/** * cdns_uart_probe - Platform driver probe * @pdev: Pointer to the platform device structure * * Return: 0 on success, negative errno otherwise */ static int cdns_uart_probe(struct platform_device *pdev) { int rc, id, irq; struct uart_port *port; struct resource *res; struct cdns_uart *cdns_uart_data; const struct of_device_id *match; cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), GFP_KERNEL); if (!cdns_uart_data) return -ENOMEM; match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); if (match && match->data) { const struct cdns_platform_data *data = match->data; cdns_uart_data->quirks = data->quirks; } cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(cdns_uart_data->pclk)) { cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk"); if (!IS_ERR(cdns_uart_data->pclk)) dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n"); } if (IS_ERR(cdns_uart_data->pclk)) { dev_err(&pdev->dev, "pclk clock not found.\n"); return PTR_ERR(cdns_uart_data->pclk); } cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk"); if (IS_ERR(cdns_uart_data->uartclk)) { cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk"); if (!IS_ERR(cdns_uart_data->uartclk)) dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n"); } if (IS_ERR(cdns_uart_data->uartclk)) { dev_err(&pdev->dev, "uart_clk clock not found.\n"); return PTR_ERR(cdns_uart_data->uartclk); } rc = clk_prepare(cdns_uart_data->pclk); if (rc) { dev_err(&pdev->dev, "Unable to enable pclk clock.\n"); return rc; } rc = clk_prepare(cdns_uart_data->uartclk); if (rc) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto err_out_clk_dis_pclk; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { rc = -ENODEV; goto err_out_clk_disable; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { rc = -ENXIO; goto err_out_clk_disable; } #ifdef CONFIG_COMMON_CLK cdns_uart_data->clk_rate_change_nb.notifier_call = cdns_uart_clk_notifier_cb; if (clk_notifier_register(cdns_uart_data->uartclk, &cdns_uart_data->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); #endif /* Look for a serialN alias */ id = of_alias_get_id(pdev->dev.of_node, "serial"); if (id < 0) id = 0; /* Initialize the port structure */ port = cdns_uart_get_port(id); if (!port) { dev_err(&pdev->dev, "Cannot get uart_port structure\n"); rc = -ENODEV; goto err_out_notif_unreg; } else { /* Register the port. * This function also registers this device with the tty layer * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; port->irq = irq; port->dev = &pdev->dev; port->uartclk = clk_get_rate(cdns_uart_data->uartclk); port->private_data = cdns_uart_data; cdns_uart_data->port = port; platform_set_drvdata(pdev, port); rc = uart_add_one_port(&cdns_uart_uart_driver, port); if (rc) { dev_err(&pdev->dev, "uart_add_one_port() failed; err=%i\n", rc); goto err_out_notif_unreg; } return 0; } err_out_notif_unreg: #ifdef CONFIG_COMMON_CLK clk_notifier_unregister(cdns_uart_data->uartclk, &cdns_uart_data->clk_rate_change_nb); #endif err_out_clk_disable: clk_unprepare(cdns_uart_data->uartclk); err_out_clk_dis_pclk: clk_unprepare(cdns_uart_data->pclk); return rc; }
static int fimc_lite_probe(struct platform_device *pdev) { struct flite_drvdata *drv_data = NULL; struct device *dev = &pdev->dev; const struct of_device_id *of_id; struct fimc_lite *fimc; struct resource *res; int ret; if (!dev->of_node) return -ENODEV; fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL); if (!fimc) return -ENOMEM; of_id = of_match_node(flite_of_match, dev->of_node); if (of_id) drv_data = (struct flite_drvdata *)of_id->data; fimc->index = of_alias_get_id(dev->of_node, "fimc-lite"); if (!drv_data || fimc->index >= drv_data->num_instances || fimc->index < 0) { dev_err(dev, "Wrong %s node alias\n", dev->of_node->full_name); return -EINVAL; } fimc->dd = drv_data; fimc->pdev = pdev; init_waitqueue_head(&fimc->irq_queue); spin_lock_init(&fimc->slock); mutex_init(&fimc->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fimc->regs = devm_ioremap_resource(dev, res); if (IS_ERR(fimc->regs)) return PTR_ERR(fimc->regs); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "Failed to get IRQ resource\n"); return -ENXIO; } ret = fimc_lite_clk_get(fimc); if (ret) return ret; ret = devm_request_irq(dev, res->start, flite_irq_handler, 0, dev_name(dev), fimc); if (ret) { dev_err(dev, "Failed to install irq (%d)\n", ret); goto err_clk_put; } /* The video node will be created within the subdev's registered() op */ ret = fimc_lite_create_capture_subdev(fimc); if (ret) goto err_clk_put; platform_set_drvdata(pdev, fimc); pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { ret = clk_enable(fimc->clock); if (ret < 0) goto err_sd; } fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(fimc->alloc_ctx)) { ret = PTR_ERR(fimc->alloc_ctx); goto err_clk_dis; } fimc_lite_set_default_config(fimc); dev_dbg(dev, "FIMC-LITE.%d registered successfully\n", fimc->index); return 0; err_clk_dis: if (!pm_runtime_enabled(dev)) clk_disable(fimc->clock); err_sd: fimc_lite_unregister_capture_subdev(fimc); err_clk_put: fimc_lite_clk_put(fimc); return ret; }
static int g2d_probe(struct platform_device *pdev) { struct g2d_dev *dev; struct video_device *vfd; struct resource *res; const struct of_device_id *of_id; int ret = 0; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctrl_lock); mutex_init(&dev->mutex); atomic_set(&dev->num_inst, 0); init_waitqueue_head(&dev->irq_queue); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dev->regs)) return PTR_ERR(dev->regs); dev->clk = clk_get(&pdev->dev, "sclk_fimg2d"); if (IS_ERR(dev->clk)) { dev_err(&pdev->dev, "failed to get g2d clock\n"); return -ENXIO; } ret = clk_prepare(dev->clk); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock\n"); goto put_clk; } dev->gate = clk_get(&pdev->dev, "fimg2d"); if (IS_ERR(dev->gate)) { dev_err(&pdev->dev, "failed to get g2d clock gate\n"); ret = -ENXIO; goto unprep_clk; } ret = clk_prepare(dev->gate); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock gate\n"); goto put_clk_gate; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "failed to find IRQ\n"); ret = -ENXIO; goto unprep_clk_gate; } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr, 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "failed to install IRQ\n"); goto put_clk_gate; } dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(dev->alloc_ctx)) { ret = PTR_ERR(dev->alloc_ctx); goto unprep_clk_gate; } ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto alloc_ctx_cleanup; vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_v4l2_dev; } *vfd = g2d_videodev; vfd->lock = &dev->mutex; vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; } video_set_drvdata(vfd, dev); snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name); dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto unreg_video_dev; } def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3; if (!pdev->dev.of_node) { dev->variant = g2d_get_drv_data(pdev); } else { of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node); if (!of_id) { ret = -ENODEV; goto unreg_video_dev; } dev->variant = (struct g2d_variant *)of_id->data; } return 0; unreg_video_dev: video_unregister_device(dev->vfd); rel_vdev: video_device_release(vfd); unreg_v4l2_dev: v4l2_device_unregister(&dev->v4l2_dev); alloc_ctx_cleanup: vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); unprep_clk_gate: clk_unprepare(dev->gate); put_clk_gate: clk_put(dev->gate); unprep_clk: clk_unprepare(dev->clk); put_clk: clk_put(dev->clk); return ret; }
return 0; } static int get_mdp_ver(struct platform_device *pdev) { #ifdef CONFIG_OF static const struct of_device_id match_types[] = { { .compatible = "qcom,mdss_mdp", .data = (void *)5, }, { /* end node */ } }; struct device *dev = &pdev->dev; const struct of_device_id *match; match = of_match_node(match_types, dev->of_node); if (match) return (int)(unsigned long)match->data; #endif return 4; } #include <linux/of_address.h> static int msm_init_vram(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; unsigned long size = 0; int ret = 0; #ifdef CONFIG_OF
static int balong_sim_probe(struct platform_device *pdev) { struct balong_sim_plat_data *plat = NULL; const struct of_device_id *match; enum of_gpio_flags gpio_flags; int err; dev_dbg(&pdev->dev, "balong_sim_probe\n"); if ((match = of_match_node(balong_sim_match, pdev->dev.of_node)) == NULL) { dev_err(&pdev->dev, "dev node is not match. exiting.\n"); return -ENODEV; } plat = (struct balong_sim_plat_data *)match->data; if (plat == NULL) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } pr_err("balong_sim_probe, enter %s\n",plat->name); if (!strcmp(SIM1 , plat->name)) { sim1_plat_data = plat; sim1_plat_data->sim_detection_gpio = of_get_gpio_by_prop(pdev->dev.of_node,"gpio-sim1_detect,gpio-irq",0,0, &gpio_flags); err = of_property_read_u32(pdev->dev.of_node, "gpio_sim1_in_val", &g_sim1_present_electrical_lvl); if (err < 0) { dev_err(&pdev->dev, "sim1 obtain the ele lvl failed err = %d\n",err); return -EINVAL; } } if (!strcmp(SIM2 , plat->name)) { sim2_plat_data = plat; sim2_plat_data->sim_detection_gpio = of_get_gpio_by_prop(pdev->dev.of_node,"gpio-sim2_detect,gpio-irq",0,0, &gpio_flags); err = of_property_read_u32(pdev->dev.of_node, "gpio_sim2_in_val", &g_sim2_present_electrical_lvl); if (err < 0) { dev_err(&pdev->dev, "sim2 obtain the ele lvl failed err = %d\n",err); return -EINVAL; } } err = gpio_request_one(plat->sim_detection_gpio, GPIOF_IN, plat->name); if (err) { dev_warn(&pdev->dev, "no sim-detect pin available!\n"); return err; } /*Initialize sim status when booting*/ if (!strcmp(SIM1 , plat->name)) { plat->sim_status = get_sim_status(plat->sim_detection_gpio, g_sim1_present_electrical_lvl); } if (!strcmp(SIM2 , plat->name)) { plat->sim_status = get_sim_status(plat->sim_detection_gpio, g_sim2_present_electrical_lvl); } /*以下代码只执行一次,SIM1和SIM2共用一个任务队列*/ if (!workqueue) { /* Initialize works */ workqueue = create_singlethread_workqueue("balong_sim_workqueue"); if (!workqueue) { dev_err(&pdev->dev, "Create workqueue failed\n"); err = -1; goto err_init_workqueue; } else { pr_info("balong_sim_probe, Initialization of workqueue succeed\n"); } } /*request SIM irq*/ if (!strcmp(SIM1 , plat->name)) { /*request irq for sim1*/ INIT_WORK(&sim1_irq_work, balong_sim1_set_status); err = request_irq(gpio_to_irq(sim1_plat_data->sim_detection_gpio), sim1_detection_irq_handler, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, pdev->name, pdev); if (err) { dev_warn(&pdev->dev, "sim1:request gpio irq error\n"); goto no_sim_detect_irq; } /* sysfs entries for IO control */ err = device_create_file(&(pdev->dev), &dev_attr_sim1_status); if (err) { dev_err(&pdev->dev, "sim1:Failed to create sysfs entry\n"); goto err_create_device_file; } } if (!strcmp(SIM2 , plat->name)) { INIT_WORK(&sim2_irq_work, balong_sim2_set_status); err = request_irq(gpio_to_irq(sim2_plat_data->sim_detection_gpio), sim2_detection_irq_handler, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, pdev->name, pdev); if (err) { dev_warn(&pdev->dev, "sim2:request gpio irq error\n"); goto no_sim_detect_irq; } /* sysfs entries for IO control */ err = device_create_file(&(pdev->dev), &dev_attr_sim2_status); if (err) { dev_err(&pdev->dev, "sim2:Failed to create sysfs entry\n"); goto err_create_device_file; } } return 0; err_create_device_file: no_sim_detect_irq: destroy_workqueue(workqueue); workqueue = NULL; err_init_workqueue: gpio_free(plat->sim_detection_gpio); return err; }
static int tsens_probe(struct platform_device *pdev) { int ret, i, num; struct device *dev; struct device_node *np; struct tsens_sensor *s; struct tsens_device *tmdev; const struct of_device_id *id; if (pdev->dev.of_node) dev = &pdev->dev; else dev = pdev->dev.parent; np = dev->of_node; num = of_property_count_u32_elems(np, "qcom,tsens-slopes"); if (num <= 0) { dev_err(dev, "invalid tsens slopes\n"); return -EINVAL; } tmdev = devm_kzalloc(dev, sizeof(*tmdev) + num * sizeof(*s), GFP_KERNEL); if (!tmdev) return -ENOMEM; tmdev->dev = dev; tmdev->num_sensors = num; for (i = 0, s = tmdev->sensor; i < tmdev->num_sensors; i++, s++) of_property_read_u32_index(np, "qcom,tsens-slopes", i, &s->slope); id = of_match_node(tsens_table, np); if (id) tmdev->ops = id->data; else tmdev->ops = &ops_8960; if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->calibrate || !tmdev->ops->get_temp) return -EINVAL; ret = tmdev->ops->init(tmdev); if (ret < 0) { dev_err(dev, "tsens init failed\n"); return ret; } ret = tmdev->ops->calibrate(tmdev); if (ret < 0) { dev_err(dev, "tsens calibration failed\n"); return ret; } ret = tsens_register(tmdev); platform_set_drvdata(pdev, tmdev); return ret; }
static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; #ifdef CONFIG_OF struct device_node *of_node = dev->of_node; const struct of_device_id *match; match = of_match_node(dt_match, of_node); if (match && match->data) { hdmi_cfg = (struct hdmi_platform_config *)match->data; DBG("hdmi phy: %s", match->compatible); } else { dev_err(dev, "unknown phy: %s\n", of_node->name); return -ENXIO; } hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else static struct hdmi_platform_config config = {}; static const char *hpd_clk_names[] = { "core_clk", "master_iface_clk", "slave_iface_clk", }; if (cpu_is_apq8064()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 70; config.ddc_data_gpio = 71; config.hpd_gpio = 72; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 100; config.ddc_data_gpio = 101; config.hpd_gpio = 102; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8x60()) { static const char *hpd_reg_names[] = { "8901_hdmi_mvs", "8901_mpp0" }; config.phy_init = hdmi_phy_8x60_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 170; config.ddc_data_gpio = 171; config.hpd_gpio = 172; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } config.mmio_name = "hdmi_msm_hdmi_addr"; config.qfprom_mmio_name = "hdmi_msm_qfprom_addr"; hdmi_cfg = &config; #endif dev->platform_data = hdmi_cfg; hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; msm_hdmi_register_audio_driver(hdmi, dev); return 0; }
static int ti_dra7_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dma_node; struct ti_dra7_xbar_data *xbar; struct property *prop; struct resource *res; u32 safe_val; int sz; void __iomem *iomem; int i, ret; if (!node) return -ENODEV; xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); if (!xbar) return -ENOMEM; dma_node = of_parse_phandle(node, "dma-masters", 0); if (!dma_node) { dev_err(&pdev->dev, "Can't get DMA master node\n"); return -ENODEV; } match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); of_node_put(dma_node); return -EINVAL; } if (of_property_read_u32(dma_node, "dma-requests", &xbar->dma_requests)) { dev_info(&pdev->dev, "Missing XBAR output information, using %u.\n", TI_DRA7_XBAR_OUTPUTS); xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; } of_node_put(dma_node); xbar->dma_inuse = devm_kcalloc(&pdev->dev, BITS_TO_LONGS(xbar->dma_requests), sizeof(unsigned long), GFP_KERNEL); if (!xbar->dma_inuse) return -ENOMEM; if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { dev_info(&pdev->dev, "Missing XBAR input information, using %u.\n", TI_DRA7_XBAR_INPUTS); xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; } if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) xbar->safe_val = (u16)safe_val; prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); if (prop) { const char pname[] = "ti,reserved-dma-request-ranges"; u32 (*rsv_events)[2]; size_t nelm = sz / sizeof(*rsv_events); int i; if (!nelm) return -EINVAL; rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); if (!rsv_events) return -ENOMEM; ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); if (ret) return ret; for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], xbar->dma_inuse); } kfree(rsv_events); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(iomem)) return PTR_ERR(iomem); xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_dra7_xbar_free; xbar->dma_offset = *(u32 *)match->data; mutex_init(&xbar->mutex); platform_set_drvdata(pdev, xbar); /* Reset the crossbar */ for (i = 0; i < xbar->dma_requests; i++) { if (!test_bit(i, xbar->dma_inuse)) ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); } ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, &xbar->dmarouter); if (ret) { /* Restore the defaults for the crossbar */ for (i = 0; i < xbar->dma_requests; i++) { if (!test_bit(i, xbar->dma_inuse)) ti_dra7_xbar_write(xbar->iomem, i, i); } } return ret; }
static int sirfsoc_uart_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sirfsoc_uart_port *sirfport; struct uart_port *port; struct resource *res; int ret; struct dma_slave_config slv_cfg = { .src_maxburst = 1, }; struct dma_slave_config tx_slv_cfg = { .dst_maxburst = 2, }; const struct of_device_id *match; match = of_match_node(sirfsoc_uart_ids, np); sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL); if (!sirfport) { ret = -ENOMEM; goto err; } sirfport->port.line = of_alias_get_id(np, "serial"); sirf_ports[sirfport->port.line] = sirfport; sirfport->port.iotype = UPIO_MEM; sirfport->port.flags = UPF_BOOT_AUTOCONF; port = &sirfport->port; port->dev = &pdev->dev; port->private_data = sirfport; sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; sirfport->hw_flow_ctrl = of_property_read_bool(np, "uart-has-rtscts") || of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */; if (of_device_is_compatible(np, "sirf,prima2-uart") || of_device_is_compatible(np, "sirf,atlas7-uart")) sirfport->uart_reg->uart_type = SIRF_REAL_UART; if (of_device_is_compatible(np, "sirf,prima2-usp-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) { sirfport->uart_reg->uart_type = SIRF_USP_UART; if (!sirfport->hw_flow_ctrl) goto usp_no_flow_control; if (of_find_property(np, "cts-gpios", NULL)) sirfport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0); else sirfport->cts_gpio = -1; if (of_find_property(np, "rts-gpios", NULL)) sirfport->rts_gpio = of_get_named_gpio(np, "rts-gpios", 0); else sirfport->rts_gpio = -1; if ((!gpio_is_valid(sirfport->cts_gpio) || !gpio_is_valid(sirfport->rts_gpio))) { ret = -EINVAL; dev_err(&pdev->dev, "Usp flow control must have cts and rts gpio"); goto err; } ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, "usp-cts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request cts gpio"); goto err; } gpio_direction_input(sirfport->cts_gpio); ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, "usp-rts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request rts gpio"); goto err; } gpio_direction_output(sirfport->rts_gpio, 1); } usp_no_flow_control: if (of_device_is_compatible(np, "sirf,atlas7-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) sirfport->is_atlas7 = true; if (of_property_read_u32(np, "fifosize", &port->fifosize)) { dev_err(&pdev->dev, "Unable to find fifosize in uart node.\n"); ret = -EFAULT; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->mapbase = res->start; port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!port->membase) { dev_err(&pdev->dev, "Cannot remap resource.\n"); ret = -ENOMEM; goto err; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->irq = res->start; sirfport->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sirfport->clk)) { ret = PTR_ERR(sirfport->clk); goto err; } port->uartclk = clk_get_rate(sirfport->clk); port->ops = &sirfsoc_uart_ops; spin_lock_init(&port->lock); platform_set_drvdata(pdev, sirfport); ret = uart_add_one_port(&sirfsoc_uart_drv, port); if (ret != 0) { dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); goto err; } sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); sirfport->rx_dma_items.xmit.buf = dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, &sirfport->rx_dma_items.dma_addr, GFP_KERNEL); if (!sirfport->rx_dma_items.xmit.buf) { dev_err(port->dev, "Uart alloc bufa failed\n"); ret = -ENOMEM; goto alloc_coherent_err; } sirfport->rx_dma_items.xmit.head = sirfport->rx_dma_items.xmit.tail = 0; if (sirfport->rx_dma_chan) dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); if (sirfport->tx_dma_chan) dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); if (sirfport->rx_dma_chan) { hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback; sirfport->is_hrt_enabled = false; } return 0; alloc_coherent_err: dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); dma_release_channel(sirfport->rx_dma_chan); err: return ret; } static int sirfsoc_uart_remove(struct platform_device *pdev) { struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_remove_one_port(&sirfsoc_uart_drv, port); if (sirfport->rx_dma_chan) { dmaengine_terminate_all(sirfport->rx_dma_chan); dma_release_channel(sirfport->rx_dma_chan); dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); } if (sirfport->tx_dma_chan) { dmaengine_terminate_all(sirfport->tx_dma_chan); dma_release_channel(sirfport->tx_dma_chan); } return 0; } #ifdef CONFIG_PM_SLEEP static int sirfsoc_uart_suspend(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_suspend_port(&sirfsoc_uart_drv, port); return 0; } static int sirfsoc_uart_resume(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_resume_port(&sirfsoc_uart_drv, port); return 0; } #endif static const struct dev_pm_ops sirfsoc_uart_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume) }; static struct platform_driver sirfsoc_uart_driver = { .probe = sirfsoc_uart_probe, .remove = sirfsoc_uart_remove, .driver = { .name = SIRFUART_PORT_NAME, .of_match_table = sirfsoc_uart_ids, .pm = &sirfsoc_uart_pm_ops, }, }; static int __init sirfsoc_uart_init(void) { int ret = 0; ret = uart_register_driver(&sirfsoc_uart_drv); if (ret) goto out; ret = platform_driver_register(&sirfsoc_uart_driver); if (ret) uart_unregister_driver(&sirfsoc_uart_drv); out: return ret; } module_init(sirfsoc_uart_init); static void __exit sirfsoc_uart_exit(void) { platform_driver_unregister(&sirfsoc_uart_driver); uart_unregister_driver(&sirfsoc_uart_drv); }
static int rockchip_thermal_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct rockchip_thermal_data *thermal; const struct of_device_id *match; struct resource *res; int irq; int i; int error; match = of_match_node(of_rockchip_thermal_match, np); if (!match) return -ENXIO; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return -EINVAL; } thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data), GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->pdev = pdev; thermal->chip = (const struct rockchip_tsadc_chip *)match->data; if (!thermal->chip) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); thermal->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(thermal->regs)) return PTR_ERR(thermal->regs); thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb"); if (IS_ERR(thermal->reset)) { error = PTR_ERR(thermal->reset); dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error); return error; } thermal->clk = devm_clk_get(&pdev->dev, "tsadc"); if (IS_ERR(thermal->clk)) { error = PTR_ERR(thermal->clk); dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error); return error; } thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(thermal->pclk)) { error = PTR_ERR(thermal->pclk); dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->clk); if (error) { dev_err(&pdev->dev, "failed to enable converter clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->pclk); if (error) { dev_err(&pdev->dev, "failed to enable pclk: %d\n", error); goto err_disable_clk; } rockchip_thermal_reset_controller(thermal->reset); error = rockchip_configure_from_dt(&pdev->dev, np, thermal); if (error) { dev_err(&pdev->dev, "failed to parse device tree data: %d\n", error); goto err_disable_pclk; } thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[0], SENSOR_CPU); if (error) { dev_err(&pdev->dev, "failed to register CPU thermal sensor: %d\n", error); goto err_disable_pclk; } error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[1], SENSOR_GPU); if (error) { dev_err(&pdev->dev, "failed to register GPU thermal sensor: %d\n", error); goto err_unregister_cpu_sensor; } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, &rockchip_thermal_alarm_irq_thread, IRQF_ONESHOT, "rockchip_thermal", thermal); if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); goto err_unregister_gpu_sensor; } thermal->chip->control(thermal->regs, true); for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; err_unregister_gpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); err_unregister_cpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: clk_disable_unprepare(thermal->clk); return error; }
static int ti_am335x_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dma_node; struct ti_am335x_xbar_data *xbar; struct resource *res; void __iomem *iomem; int i, ret; if (!node) return -ENODEV; xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); if (!xbar) return -ENOMEM; dma_node = of_parse_phandle(node, "dma-masters", 0); if (!dma_node) { dev_err(&pdev->dev, "Can't get DMA master node\n"); return -ENODEV; } match = of_match_node(ti_am335x_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); of_node_put(dma_node); return -EINVAL; } if (of_property_read_u32(dma_node, "dma-requests", &xbar->dma_requests)) { dev_info(&pdev->dev, "Missing XBAR output information, using %u.\n", TI_AM335X_XBAR_LINES); xbar->dma_requests = TI_AM335X_XBAR_LINES; } of_node_put(dma_node); if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { dev_info(&pdev->dev, "Missing XBAR input information, using %u.\n", TI_AM335X_XBAR_LINES); xbar->xbar_events = TI_AM335X_XBAR_LINES; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(iomem)) return PTR_ERR(iomem); xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_am335x_xbar_free; platform_set_drvdata(pdev, xbar); /* Reset the crossbar */ for (i = 0; i < xbar->dma_requests; i++) ti_am335x_xbar_write(xbar->iomem, i, 0); ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, &xbar->dmarouter); return ret; }
static int dsps_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; const struct dsps_musb_wrapper *wrp; struct dsps_glue *glue; struct resource *iomem; int ret, i; match = of_match_node(musb_dsps_of_match, np); if (!match) { dev_err(&pdev->dev, "fail to get matching of_match struct\n"); ret = -EINVAL; goto err0; } wrp = match->data; /* allocate glue */ glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "unable to allocate glue memory\n"); ret = -ENOMEM; goto err0; } /* get memory resource */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { dev_err(&pdev->dev, "failed to get usbss mem resourse\n"); ret = -ENODEV; goto err1; } glue->dev = &pdev->dev; glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL); if (!glue->wrp) { dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n"); ret = -ENOMEM; goto err1; } platform_set_drvdata(pdev, glue); /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); goto err2; } /* create the child platform device for all instances of musb */ for (i = 0; i < wrp->instances ; i++) { ret = dsps_create_musb_pdev(glue, i); if (ret != 0) { dev_err(&pdev->dev, "failed to create child pdev\n"); /* release resources of previously created instances */ for (i--; i >= 0 ; i--) platform_device_unregister(glue->musb[i]); goto err3; } } return 0; err3: pm_runtime_put(&pdev->dev); err2: pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); err0: return ret; }
static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); if (!dc->wq) return -ENOMEM; init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); goto err_destroy_wq; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); drm_kms_helper_poll_init(dev); /* force connectors detection */ drm_helper_hpd_irq_event(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); err_destroy_wq: destroy_workqueue(dc->wq); return ret; }
static int rk3036_tve_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct resource *res; const struct of_device_id *match; int i; int val = 0; match = of_match_node(rk3036_tve_dt_ids, np); if (!match) return PTR_ERR(match); rk3036_tve = devm_kzalloc(&pdev->dev, sizeof(struct rk3036_tve), GFP_KERNEL); if (!rk3036_tve) { dev_err(&pdev->dev, "rk3036 tv encoder device kmalloc fail!"); return -ENOMEM; } if (of_property_read_u32(np, "test_mode", &val)) rk3036_tve->test_mode = 0; else rk3036_tve->test_mode = val; if (!strcmp(match->compatible, "rockchip,rk3036-tve")) { rk3036_tve->soctype = SOC_RK3036; rk3036_tve->inputformat = INPUT_FORMAT_RGB; } else if (!strcmp(match->compatible, "rockchip,rk312x-tve")) { rk3036_tve->soctype = SOC_RK312X; rk3036_tve->inputformat = INPUT_FORMAT_YUV; } else { dev_err(&pdev->dev, "It is not a valid tv encoder!"); kfree(rk3036_tve); return -ENOMEM; } platform_set_drvdata(pdev, rk3036_tve); rk3036_tve->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rk3036_tve->reg_phy_base = res->start; rk3036_tve->len = resource_size(res); rk3036_tve->regbase = ioremap(res->start, rk3036_tve->len); if (IS_ERR(rk3036_tve->regbase)) { dev_err(&pdev->dev, "rk3036 tv encoder device map registers failed!"); return PTR_ERR(rk3036_tve->regbase); } INIT_LIST_HEAD(&(rk3036_tve->modelist)); for (i = 0; i < ARRAY_SIZE(rk3036_cvbs_mode); i++) fb_add_videomode(&rk3036_cvbs_mode[i], &(rk3036_tve->modelist)); if (cvbsformat >= 0) { rk3036_tve->mode = (struct fb_videomode *)&rk3036_cvbs_mode[cvbsformat]; rk3036_tve->enable = 1; tve_switch_fb(rk3036_tve->mode, 1); } else { rk3036_tve->mode = (struct fb_videomode *)&rk3036_cvbs_mode[1]; } rk3036_tve->ddev = rk_display_device_register(&display_cvbs, &pdev->dev, NULL); rk_display_device_enable(rk3036_tve->ddev); fb_register_client(&tve_fb_notifier); cvbsformat = -1; dev_info(&pdev->dev, "%s tv encoder probe ok\n", match->compatible); return 0; }
static bool sun4i_drv_node_is_tcon(struct device_node *node) { return !!of_match_node(sun4i_tcon_of_table, node); }
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct brcmstb_gisb_arb_device *gdev; const struct of_device_id *of_id; struct resource *r; int err, timeout_irq, tea_irq; unsigned int num_masters, j = 0; int i, first, last; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); timeout_irq = platform_get_irq(pdev, 0); tea_irq = platform_get_irq(pdev, 1); gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL); if (!gdev) return -ENOMEM; mutex_init(&gdev->lock); INIT_LIST_HEAD(&gdev->next); gdev->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(gdev->base)) return PTR_ERR(gdev->base); of_id = of_match_node(brcmstb_gisb_arb_of_match, dn); if (!of_id) { pr_err("failed to look up compatible string\n"); return -EINVAL; } gdev->gisb_offsets = of_id->data; gdev->big_endian = of_device_is_big_endian(dn); err = devm_request_irq(&pdev->dev, timeout_irq, brcmstb_gisb_timeout_handler, 0, pdev->name, gdev); if (err < 0) return err; err = devm_request_irq(&pdev->dev, tea_irq, brcmstb_gisb_tea_handler, 0, pdev->name, gdev); if (err < 0) return err; /* If we do not have a valid mask, assume all masters are enabled */ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask", &gdev->valid_mask)) gdev->valid_mask = 0xffffffff; /* Proceed with reading the litteral names if we agree on the * number of masters */ num_masters = of_property_count_strings(dn, "brcm,gisb-arb-master-names"); if (hweight_long(gdev->valid_mask) == num_masters) { first = ffs(gdev->valid_mask) - 1; last = fls(gdev->valid_mask) - 1; for (i = first; i < last; i++) { if (!(gdev->valid_mask & BIT(i))) continue; of_property_read_string_index(dn, "brcm,gisb-arb-master-names", j, &gdev->master_names[i]); j++; } } err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); if (err) return err; platform_set_drvdata(pdev, gdev); list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); #ifdef CONFIG_ARM hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, "imprecise external abort"); #endif dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", gdev->base, timeout_irq, tea_irq); return 0; }
static bool sun4i_drv_node_is_tcon_top(struct device_node *node) { return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && !!of_match_node(sun8i_tcon_top_of_table, node); }
static int hisi_gpio_key_probe(struct platform_device* pdev) { struct hisi_gpio_key *gpio_key = NULL; struct input_dev *input_dev = NULL; enum of_gpio_flags flags; int err =0; if (NULL == pdev) { printk(KERN_ERR "[gpiokey]parameter error!\n"); return -EINVAL; } dev_info(&pdev->dev, "hisi gpio key driver probes start!\n"); #ifdef CONFIG_OF if (!of_match_node(hs_gpio_key_match, pdev->dev.of_node)) { dev_err(&pdev->dev, "dev node is not match. exiting.\n"); return -ENODEV; } #endif gpio_key = devm_kzalloc(&pdev->dev, sizeof(struct hisi_gpio_key), GFP_KERNEL); if (!gpio_key) { dev_err(&pdev->dev, "Failed to allocate struct hisi_gpio_key!\n"); return -ENOMEM; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "Failed to allocate struct input_dev!\n"); return -ENOMEM; } input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_set_drvdata(input_dev, gpio_key); set_bit(EV_KEY, input_dev->evbit); set_bit(EV_SYN, input_dev->evbit); set_bit(KEY_VOLUMEUP, input_dev->keybit); set_bit(KEY_VOLUMEDOWN, input_dev->keybit); #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX set_bit(KEY_BACK, input_dev->keybit); #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY set_bit(KEY_F24, input_dev->keybit); #endif input_dev->open = hisi_gpio_key_open; input_dev->close = hisi_gpio_key_close; gpio_key->input_dev = input_dev; /*initial work before we use it.*/ INIT_DELAYED_WORK(&(gpio_key->gpio_keyup_work), hisi_gpio_keyup_work); INIT_DELAYED_WORK(&(gpio_key->gpio_keydown_work), hisi_gpio_keydown_work); wake_lock_init(&volume_down_key_lock, WAKE_LOCK_SUSPEND, "key_down_wake_lock"); wake_lock_init(&volume_up_key_lock, WAKE_LOCK_SUSPEND, "key_up_wake_lock"); #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX INIT_DELAYED_WORK(&(gpio_key->gpio_keyback_work), hisi_gpio_keyback_work); wake_lock_init(&back_key_lock, WAKE_LOCK_SUSPEND, "key_back_wake_lock"); #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY INIT_DELAYED_WORK(&(gpio_key->gpio_keysmart_work), hisi_gpio_keysmart_work); wake_lock_init(&smart_key_lock, WAKE_LOCK_SUSPEND, "key_smart_wake_lock"); #endif gpio_key->gpio_up = of_get_key_gpio(pdev->dev.of_node, "gpio-keyup,gpio-irq", 0, 0, &flags); if (!gpio_is_valid(gpio_key->gpio_up)) { printk(KERN_INFO "%s: gpio of volume up is not valid, check DTS\n", __FUNCTION__); } gpio_key->gpio_down = of_get_key_gpio(pdev->dev.of_node, "gpio-keydown,gpio-irq", 0, 0, &flags); if (!gpio_is_valid(gpio_key->gpio_down)) { printk(KERN_INFO "%s: gpio of volume down is not valid, check DTS\n", __FUNCTION__); } #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX gpio_key->gpio_back = of_get_key_gpio(pdev->dev.of_node, "gpio-keyback,gpio-irq", 0, 0, &flags); if (!gpio_is_valid(gpio_key->gpio_back)) { printk(KERN_INFO "%s: gpio of back key is not valid, check DTS\n", __FUNCTION__); } #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY gpio_key->gpio_smart = of_get_key_gpio(pdev->dev.of_node, "gpio-keysmart,gpio-irq", 0, 0, &flags); if (!gpio_is_valid(gpio_key->gpio_smart)) { printk(KERN_INFO "%s: gpio of smart key is not valid, check DTS\n", __FUNCTION__); } #endif vol_up_gpio = gpio_key->gpio_up; vol_up_active_low = GPIO_KEY_PRESS; vol_down_gpio = gpio_key->gpio_down; vol_down_active_low = GPIO_KEY_PRESS; if (gpio_is_valid(gpio_key->gpio_up)) { err = gpio_request((unsigned int)gpio_key->gpio_up, "gpio_up"); if (err < 0) { dev_err(&pdev->dev, "Fail request gpio:%d\n", gpio_key->gpio_up); goto err_get_gpio; } gpio_direction_input((unsigned int)gpio_key->gpio_up); gpio_key->volume_up_irq = gpio_to_irq((unsigned int)gpio_key->gpio_up); if (gpio_key->volume_up_irq < 0) { dev_err(&pdev->dev, "Failed to get gpio key press irq!\n"); err = gpio_key->volume_up_irq; goto err_gpio_to_irq; } } if (gpio_is_valid(gpio_key->gpio_down)) { err = gpio_request((unsigned int)gpio_key->gpio_down, "gpio_down"); if (err) { dev_err(&pdev->dev, "Fail request gpio:%d\n", gpio_key->gpio_down); goto err_gpio_down_req; } gpio_direction_input((unsigned int)gpio_key->gpio_down); gpio_key->volume_down_irq = gpio_to_irq((unsigned int)gpio_key->gpio_down); if (gpio_key->volume_down_irq < 0) { dev_err(&pdev->dev, "Failed to get gpio key release irq!\n"); err = gpio_key->volume_down_irq; goto err_gpio_to_irq; } } #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX if (gpio_is_valid(gpio_key->gpio_back)) { err = gpio_request((unsigned int)gpio_key->gpio_back, "gpio_back"); if (err) { dev_err(&pdev->dev, "Fail request gpio:%d\n", gpio_key->gpio_back); goto err_gpio_back_req; } gpio_direction_input((unsigned int)gpio_key->gpio_back); gpio_key->key_back_irq = gpio_to_irq((unsigned int)gpio_key->gpio_back); if (gpio_key->key_back_irq < 0) { dev_err(&pdev->dev, "Failed to get gpio key release irq!\n"); err = gpio_key->key_back_irq; goto err_gpio_to_irq; } } #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY if (gpio_is_valid(gpio_key->gpio_smart)) { err = gpio_request((unsigned int)gpio_key->gpio_smart, "gpio_smart"); if (err) { dev_err(&pdev->dev, "Fail request gpio:%d\n", gpio_key->gpio_smart); goto err_gpio_smart_req; } gpio_direction_input((unsigned int)gpio_key->gpio_smart); gpio_key->key_smart_irq = gpio_to_irq((unsigned int)gpio_key->gpio_smart); if (gpio_key->key_smart_irq < 0) { dev_err(&pdev->dev, "Failed to get gpio key release irq!\n"); err = gpio_key->key_smart_irq; goto err_gpio_to_irq; } } #endif gpio_key->pctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(gpio_key->pctrl)) { dev_err(&pdev->dev, "failed to devm pinctrl get\n"); err = -EINVAL; goto err_pinctrl; } gpio_key->pins_default = pinctrl_lookup_state(gpio_key->pctrl, PINCTRL_STATE_DEFAULT); if (IS_ERR(gpio_key->pins_default)) { dev_err(&pdev->dev, "failed to pinctrl lookup state default\n"); err = -EINVAL; goto err_pinctrl_put; } gpio_key->pins_idle = pinctrl_lookup_state(gpio_key->pctrl, PINCTRL_STATE_IDLE); if (IS_ERR(gpio_key->pins_idle)) { dev_err(&pdev->dev, "failed to pinctrl lookup state idle\n"); err = -EINVAL; goto err_pinctrl_put; } err = pinctrl_select_state(gpio_key->pctrl, gpio_key->pins_default); if (err < 0) { dev_err(&pdev->dev, "set iomux normal error, %d\n", err); goto err_pinctrl_put; } #if defined (CONFIG_HUAWEI_DSM) /* initialize the statistic variable */ volume_up_press_count = 0; volume_down_press_count = 0; volume_up_last_press_time = 0; volume_down_last_press_time = 0; #endif setup_timer(&(gpio_key->key_up_timer), gpio_keyup_timer, (unsigned long )gpio_key); setup_timer(&(gpio_key->key_down_timer), gpio_keydown_timer, (unsigned long )gpio_key); #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX setup_timer(&(gpio_key->key_back_timer), gpio_keyback_timer, (unsigned long )gpio_key); #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY setup_timer(&(gpio_key->key_smart_timer), gpio_keysmart_timer, (unsigned long )gpio_key); #endif #if defined (CONFIG_HUAWEI_DSM) setup_timer(&dsm_gpio_key_timer, dsm_gpio_key_timer_func, (unsigned long)gpio_key); #endif /* * support failing irq that means volume-up-key is pressed, * and rising irq which means volume-up-key is released. */ if (gpio_is_valid(gpio_key->gpio_up)) { err = request_irq(gpio_key->volume_up_irq, hisi_gpio_key_irq_handler, IRQF_NO_SUSPEND | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, pdev->name, gpio_key); if (err) { dev_err(&pdev->dev, "Failed to request press interupt handler!\n"); goto err_up_irq_req; } } /* * support failing irq that means volume-down-key is pressed, * and rising irq which means volume-down-key is released. */ if (gpio_is_valid(gpio_key->gpio_down)) { err = request_irq(gpio_key->volume_down_irq, hisi_gpio_key_irq_handler, IRQF_NO_SUSPEND | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, pdev->name, gpio_key); if (err) { dev_err(&pdev->dev, "Failed to request release interupt handler!\n"); goto err_down_irq_req; } } #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX if (gpio_is_valid(gpio_key->gpio_back)) { err = request_irq(gpio_key->key_back_irq, hisi_gpio_key_irq_handler, IRQF_NO_SUSPEND | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, pdev->name, gpio_key); if (err) { dev_err(&pdev->dev, "Failed to request release interupt handler!\n"); goto err_back_irq_req; } } #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY if (gpio_is_valid(gpio_key->gpio_smart)) { err = request_irq(gpio_key->key_smart_irq, hisi_gpio_key_irq_handler, IRQF_NO_SUSPEND | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, pdev->name, gpio_key); if (err) { dev_err(&pdev->dev, "Failed to request release interupt handler!\n"); goto err_smart_irq_req; } } #endif err = input_register_device(gpio_key->input_dev); if (err) { dev_err(&pdev->dev, "Failed to register input device!\n"); goto err_register_dev; } device_init_wakeup(&pdev->dev, TRUE); platform_set_drvdata(pdev, gpio_key); #if defined (CONFIG_HUAWEI_DSM) if (!key_dclient) { key_dclient = dsm_register_client(&dsm_key); } mod_timer(&dsm_gpio_key_timer, jiffies + STATISTIC_INTERVAL * HZ); #endif dev_info(&pdev->dev, "hisi gpio key driver probes successfully!\n"); return 0; err_register_dev: #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX free_irq(gpio_key->key_back_irq, gpio_key); err_back_irq_req: #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY free_irq(gpio_key->key_smart_irq, gpio_key); err_smart_irq_req: #endif free_irq(gpio_key->volume_down_irq, gpio_key); err_down_irq_req: free_irq(gpio_key->volume_up_irq, gpio_key); err_up_irq_req: err_pinctrl_put: devm_pinctrl_put(gpio_key->pctrl); err_pinctrl: err_gpio_to_irq: #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX gpio_free((unsigned int)gpio_key->gpio_back); err_gpio_back_req: #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY gpio_free((unsigned int)gpio_key->gpio_smart); err_gpio_smart_req: #endif gpio_free((unsigned int)gpio_key->gpio_down); err_gpio_down_req: gpio_free((unsigned int)gpio_key->gpio_up); err_get_gpio: input_free_device(input_dev); wake_lock_destroy(&volume_down_key_lock); wake_lock_destroy(&volume_up_key_lock); #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_HI6XXX wake_lock_destroy(&back_key_lock); #endif #ifdef CONFIG_HISI_GPIO_KEY_SUPPORT_SMART_KEY wake_lock_destroy(&smart_key_lock); #endif pr_info(KERN_ERR "[gpiokey]K3v3 gpio key probe failed! ret = %d.\n", err); return err; }
static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node) { return !!of_match_node(sun8i_tcon_top_of_table, node); }
static int k3_vibrator_probe(struct platform_device *pdev) { struct k3_vibrator_data *p_data; struct resource *res; int ret = 0; if (!of_match_node(hsk3_vibrator_match, pdev->dev.of_node)) { dev_err(&pdev->dev, "dev node is not match. exiting.\n"); return -ENODEV; } ret = k3_vibrator_get_vout(pdev); if (ret) { dev_err(&pdev->dev, "failed to get vib vout\n"); return ret; } p_data = kzalloc(sizeof(struct k3_vibrator_data), GFP_KERNEL); if (p_data == NULL) { dev_err(&pdev->dev, "failed to allocate vibrator_device\n"); return -ENOMEM; } /* get base_addres */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to find registers\n"); ret = -ENXIO; goto err; } p_data->k3_vibrator_base = ioremap(res->start, resource_size(res)); if (p_data->k3_vibrator_base == 0) { dev_err(&pdev->dev, "failed to map registers\n"); ret = -ENXIO; goto err; } /* init timer */ hrtimer_init(&p_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); p_data->timer.function = k3_vibrator_timer_func; /* init lock */ mutex_init(&p_data->lock); p_data->mode = SET_MODE; p_data->freq = PERIOD; p_data->power = ISET_POWER; p_data->dev.name = K3_VIBRATOR; p_data->dev.get_time = k3_vibrator_get_time; p_data->dev.enable = k3_vibrator_enable; ret = timed_output_dev_register(&p_data->dev); if (ret < 0) { dev_err(&pdev->dev, "failed to regist dev\n"); goto err_remap; } platform_set_drvdata(pdev, p_data); k3_vibrator_pdata = p_data; /* create a single thread workquene */ done_queue = create_workqueue("done_queue"); if (!done_queue) { dev_err(&pdev->dev, "failed to creat workqueue\n"); ret = -ENOMEM; goto err_regis; } /* DTS2012050403313 end: add by KF74453 for resolved vibrator freq call adc function at 2010-06-13 */ ret = driver_create_file(&k3_vibrator_driver.driver, &driver_attr_state); if (ret) { dev_err(&pdev->dev, "could not create sysfs files\n"); goto err_regis; } printk("%s: successful!\n",__FUNCTION__); return 0; err_regis: timed_output_dev_unregister(&p_data->dev); err_remap: iounmap(p_data->k3_vibrator_base); err: kfree(p_data); p_data = NULL; printk("%s: failed!\n",__FUNCTION__); return ret; }
static int xhci_plat_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct hc_driver *driver; struct xhci_hcd *xhci; struct resource *res; struct usb_hcd *hcd; struct clk *clk; int ret; int irq; if (usb_disabled()) return -ENODEV; driver = &xhci_plat_hc_driver; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; /* Try to set 64-bit DMA first */ if (WARN_ON(!pdev->dev.dma_mask)) /* Platform did not initialize dma_mask */ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); else ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); /* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */ if (ret) { ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; } hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto put_hcd; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); /* * Not all platforms have a clk so it is not an error if the * clock does not exists. */ clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(clk)) { ret = clk_prepare_enable(clk); if (ret) goto put_hcd; } else if (PTR_ERR(clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto put_hcd; } xhci = hcd_to_xhci(hcd); match = of_match_node(usb_xhci_of_match, pdev->dev.of_node); if (match) { const struct xhci_plat_priv *priv_match = match->data; struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); /* Just copy data for now */ if (priv_match) *priv = *priv_match; } device_wakeup_enable(hcd->self.controller); xhci->clk = clk; xhci->main_hcd = hcd; xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev, dev_name(&pdev->dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto disable_clk; } if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; if (HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0); if (IS_ERR(hcd->usb_phy)) { ret = PTR_ERR(hcd->usb_phy); if (ret == -EPROBE_DEFER) goto put_usb3_hcd; hcd->usb_phy = NULL; } else { ret = usb_phy_init(hcd->usb_phy); if (ret) goto put_usb3_hcd; } ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto disable_usb_phy; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto dealloc_usb2_hcd; return 0; dealloc_usb2_hcd: usb_remove_hcd(hcd); disable_usb_phy: usb_phy_shutdown(hcd->usb_phy); put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); disable_clk: if (!IS_ERR(clk)) clk_disable_unprepare(clk); put_hcd: usb_put_hcd(hcd); return ret; }
static int xylonfb_get_logicvc_configuration(struct xylonfb_data *data) { struct device *dev = &data->pdev->dev; struct device_node *dn = data->device; const struct of_device_id *match; struct videomode vm; int i, ret; XYLONFB_DBG(INFO, "%s", __func__); match = of_match_node(logicvc_of_match, dn); if (!match) { dev_err(dev, "failed match logicvc\n"); return -ENODEV; } ret = of_address_to_resource(dn, 0, &data->resource_mem); if (ret) { dev_err(dev, "failed get mem resource\n"); return ret; } data->irq = of_irq_to_resource(dn, 0, &data->resource_irq); if (data->irq == 0) { dev_err(dev, "failed get irq resource\n"); return ret; } ret = xylon_parse_hw_info(dn, data); if (ret) return ret; for (i = 0; i < LOGICVC_MAX_LAYERS; i++) { ret = xylonfb_parse_layer_info(dn, data, i); if (ret < 0) return ret; if (ret == 0) break; } if (data->flags & XYLONFB_FLAGS_BACKGROUND_LAYER && data->layers == LOGICVC_MAX_LAYERS) { data->flags &= ~XYLONFB_FLAGS_BACKGROUND_LAYER; data->layers--; if (data->console_layer == data->layers) data->console_layer--; dev_warn(dev, "invalid last layer configuration\n"); } memset(&vm, 0, sizeof(vm)); if (!(data->flags & XYLONFB_FLAGS_EDID_VMODE) && (data->vm.name[0] == 0)) { ret = of_get_videomode(dn, &vm, OF_USE_NATIVE_MODE); if (!ret) { fb_videomode_from_videomode(&vm, &data->vm.vmode); sprintf(data->vm.name, "%dx%d", data->vm.vmode.xres, data->vm.vmode.yres); data->flags |= XYLONFB_FLAGS_VMODE_CUSTOM; } } xylonfb_init_ctrl(dn, vm.flags, &data->vm.ctrl); return 0; }
/* register exynos_audss clocks */ static int exynos_audss_clk_probe(struct platform_device *pdev) { int i, ret = 0; struct resource *res; const char *mout_audss_p[] = {"fin_pll", "fout_epll"}; const char *mout_i2s_p[] = {"mout_audss", "cdclk0", "sclk_audio0"}; const char *sclk_pcm_p = "sclk_pcm0"; struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in; const struct of_device_id *match; enum exynos_audss_clk_type variant; match = of_match_node(exynos_audss_clk_of_match, pdev->dev.of_node); if (!match) return -EINVAL; variant = (enum exynos_audss_clk_type)match->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(reg_base)) { dev_err(&pdev->dev, "failed to map audss registers\n"); return PTR_ERR(reg_base); } clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, GFP_KERNEL); if (!clk_table) return -ENOMEM; clk_data.clks = clk_table; if (variant == TYPE_EXYNOS5420) clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS; else clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS - 1; pll_ref = devm_clk_get(&pdev->dev, "pll_ref"); pll_in = devm_clk_get(&pdev->dev, "pll_in"); if (!IS_ERR(pll_ref)) mout_audss_p[0] = __clk_get_name(pll_ref); if (!IS_ERR(pll_in)) mout_audss_p[1] = __clk_get_name(pll_in); clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", mout_audss_p, ARRAY_SIZE(mout_audss_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); cdclk = devm_clk_get(&pdev->dev, "cdclk"); sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio"); if (!IS_ERR(cdclk)) mout_i2s_p[1] = __clk_get_name(cdclk); if (!IS_ERR(sclk_audio)) mout_i2s_p[2] = __clk_get_name(sclk_audio); clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s", mout_i2s_p, ARRAY_SIZE(mout_i2s_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp", "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL, "dout_aud_bus", "dout_srp", 0, reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s", "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0, &lock); clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk", "dout_srp", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 0, 0, &lock); clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus", "dout_aud_bus", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 2, 0, &lock); clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s", "dout_i2s", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 3, 0, &lock); clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus", "sclk_pcm", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 4, 0, &lock); sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in"); if (!IS_ERR(sclk_pcm_in)) sclk_pcm_p = __clk_get_name(sclk_pcm_in); clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm", sclk_pcm_p, CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 5, 0, &lock); if (variant == TYPE_EXYNOS5420) { clk_table[EXYNOS_ADMA] = clk_register_gate(NULL, "adma", "dout_srp", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 9, 0, &lock); } for (i = 0; i < clk_data.clk_num; i++) { if (IS_ERR(clk_table[i])) { dev_err(&pdev->dev, "failed to register clock %d\n", i); ret = PTR_ERR(clk_table[i]); goto unregister; } } ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, &clk_data); if (ret) { dev_err(&pdev->dev, "failed to add clock provider\n"); goto unregister; } #ifdef CONFIG_PM_SLEEP register_syscore_ops(&exynos_audss_clk_syscore_ops); #endif dev_info(&pdev->dev, "setup completed\n"); return 0; unregister: for (i = 0; i < clk_data.clk_num; i++) { if (!IS_ERR(clk_table[i])) clk_unregister(clk_table[i]); } return ret; }
static void __init st_of_clkgena_divmux_setup(struct device_node *np) { const struct of_device_id *match; const struct clkgena_divmux_data *data; struct clk_onecell_data *clk_data; void __iomem *reg; const char **parents; int num_parents = 0, i; match = of_match_node(clkgena_divmux_of_match, np); if (WARN_ON(!match)) return; data = match->data; reg = clkgen_get_register_base(np); if (!reg) return; parents = clkgen_mux_get_parents(np, &num_parents); if (IS_ERR(parents)) goto err_parents; clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) goto err_alloc; clk_data->clk_num = data->num_outputs; clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) goto err_alloc_clks; for (i = 0; i < clk_data->clk_num; i++) { struct clk *clk; const char *clk_name; if (of_property_read_string_index(np, "clock-output-names", i, &clk_name)) break; /* * If we read an empty clock name then the output is unused */ if (*clk_name == '\0') continue; clk = clk_register_genamux(clk_name, parents, num_parents, reg, data, i); if (IS_ERR(clk)) goto err; clk_data->clks[i] = clk; } kfree(parents); of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); return; err: kfree(clk_data->clks); err_alloc_clks: kfree(clk_data); err_alloc: kfree(parents); err_parents: iounmap(reg); }