static int ehci_oxnas_drv_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource res; struct oxnas_hcd *oxnas; int irq, err; struct reset_control *rstc; if (usb_disabled()) return -ENODEV; if (!ofdev->dev.dma_mask) ofdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask; if (!ofdev->dev.coherent_dma_mask) ofdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&oxnas_hc_driver, &ofdev->dev, dev_name(&ofdev->dev)); if (!hcd) return -ENOMEM; err = of_address_to_resource(np, 0, &res); if (err) goto err_res; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); hcd->regs = devm_ioremap_resource(&ofdev->dev, &res); if (IS_ERR(hcd->regs)) { dev_err(&ofdev->dev, "devm_ioremap_resource failed\n"); err = PTR_ERR(hcd->regs); goto err_ioremap; } oxnas = (struct oxnas_hcd *)hcd_to_ehci(hcd)->priv; oxnas->use_pllb = of_property_read_bool(np, "plxtech,ehci_use_pllb"); oxnas->use_phya = of_property_read_bool(np, "plxtech,ehci_use_phya"); oxnas->clk = of_clk_get_by_name(np, "usb"); if (IS_ERR(oxnas->clk)) { err = PTR_ERR(oxnas->clk); goto err_clk; } if (oxnas->use_pllb) { oxnas->refsrc = of_clk_get_by_name(np, "refsrc"); if (IS_ERR(oxnas->refsrc)) { err = PTR_ERR(oxnas->refsrc); goto err_refsrc; } oxnas->phyref = of_clk_get_by_name(np, "phyref"); if (IS_ERR(oxnas->refsrc)) { err = PTR_ERR(oxnas->refsrc); goto err_phyref; } } else { oxnas->refsrc = NULL; oxnas->phyref = NULL; } rstc = devm_reset_control_get(&ofdev->dev, "host"); if (IS_ERR(rstc)) { err = PTR_ERR(rstc); goto err_rst; } oxnas->rst_host = rstc; rstc = devm_reset_control_get(&ofdev->dev, "phya"); if (IS_ERR(rstc)) { err = PTR_ERR(rstc); goto err_rst; } oxnas->rst_phya = rstc; rstc = devm_reset_control_get(&ofdev->dev, "phyb"); if (IS_ERR(rstc)) { err = PTR_ERR(rstc); goto err_rst; } oxnas->rst_phyb = rstc; irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n"); err = -EBUSY; goto err_irq; } hcd->has_tt = 1; ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; start_oxnas_usb_ehci(oxnas); err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED); if (err) goto err_hcd; return 0; err_hcd: stop_oxnas_usb_ehci(oxnas); err_irq: err_rst: if (oxnas->phyref) clk_put(oxnas->phyref); err_phyref: if (oxnas->refsrc) clk_put(oxnas->refsrc); err_refsrc: clk_put(oxnas->clk); err_clk: err_ioremap: err_res: usb_put_hcd(hcd); return err; }
static int stm32_spdifrx_probe(struct platform_device *pdev) { struct stm32_spdifrx_data *spdifrx; struct reset_control *rst; const struct snd_dmaengine_pcm_config *pcm_config = NULL; int ret; spdifrx = devm_kzalloc(&pdev->dev, sizeof(*spdifrx), GFP_KERNEL); if (!spdifrx) return -ENOMEM; spdifrx->pdev = pdev; init_completion(&spdifrx->cs_completion); spin_lock_init(&spdifrx->lock); platform_set_drvdata(pdev, spdifrx); ret = stm_spdifrx_parse_of(pdev, spdifrx); if (ret) return ret; spdifrx->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "kclk", spdifrx->base, spdifrx->regmap_conf); if (IS_ERR(spdifrx->regmap)) { dev_err(&pdev->dev, "Regmap init failed\n"); return PTR_ERR(spdifrx->regmap); } ret = devm_request_irq(&pdev->dev, spdifrx->irq, stm32_spdifrx_isr, 0, dev_name(&pdev->dev), spdifrx); if (ret) { dev_err(&pdev->dev, "IRQ request returned %d\n", ret); return ret; } rst = devm_reset_control_get(&pdev->dev, NULL); if (!IS_ERR(rst)) { reset_control_assert(rst); udelay(2); reset_control_deassert(rst); } ret = devm_snd_soc_register_component(&pdev->dev, &stm32_spdifrx_component, stm32_spdifrx_dai, ARRAY_SIZE(stm32_spdifrx_dai)); if (ret) return ret; ret = stm32_spdifrx_dma_ctrl_register(&pdev->dev, spdifrx); if (ret) goto error; pcm_config = &stm32_spdifrx_pcm_config; ret = devm_snd_dmaengine_pcm_register(&pdev->dev, pcm_config, 0); if (ret) { dev_err(&pdev->dev, "PCM DMA register returned %d\n", ret); goto error; } return 0; error: if (spdifrx->ctrl_chan) dma_release_channel(spdifrx->ctrl_chan); if (spdifrx->dmab) snd_dma_free_pages(spdifrx->dmab); return ret; }
static int sun6i_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct sun6i_spi *sspi; struct resource *res; int ret = 0, irq; master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi)); if (!master) { dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); return -ENOMEM; } platform_set_drvdata(pdev, master); sspi = spi_master_get_devdata(master); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sspi->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sspi->base_addr)) { ret = PTR_ERR(sspi->base_addr); goto err_free_master; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No spi IRQ specified\n"); ret = -ENXIO; goto err_free_master; } ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler, 0, "sun6i-spi", sspi); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); goto err_free_master; } sspi->master = master; master->set_cs = sun6i_spi_set_cs; master->transfer_one = sun6i_spi_transfer_one; master->num_chipselect = 4; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8); master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; sspi->hclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(sspi->hclk)) { dev_err(&pdev->dev, "Unable to acquire AHB clock\n"); ret = PTR_ERR(sspi->hclk); goto err_free_master; } sspi->mclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(sspi->mclk)) { dev_err(&pdev->dev, "Unable to acquire module clock\n"); ret = PTR_ERR(sspi->mclk); goto err_free_master; } init_completion(&sspi->done); sspi->rstc = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(sspi->rstc)) { dev_err(&pdev->dev, "Couldn't get reset controller\n"); ret = PTR_ERR(sspi->rstc); goto err_free_master; } /* * This wake-up/shutdown pattern is to be able to have the * device woken up, even if runtime_pm is disabled */ ret = sun6i_spi_runtime_resume(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Couldn't resume the device\n"); goto err_free_master; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "cannot register SPI master\n"); goto err_pm_disable; } return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); sun6i_spi_runtime_suspend(&pdev->dev); err_free_master: spi_master_put(master); return ret; }
static int tegra_ahci_probe(struct platform_device *pdev) { struct ahci_host_priv *hpriv; struct tegra_ahci_priv *tegra; struct resource *res; int ret; hpriv = ahci_platform_get_resources(pdev); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); if (!tegra) return -ENOMEM; hpriv->plat_data = tegra; tegra->pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tegra->sata_regs)) return PTR_ERR(tegra->sata_regs); tegra->sata_rst = devm_reset_control_get(&pdev->dev, "sata"); if (IS_ERR(tegra->sata_rst)) { dev_err(&pdev->dev, "Failed to get sata reset\n"); return PTR_ERR(tegra->sata_rst); } tegra->sata_oob_rst = devm_reset_control_get(&pdev->dev, "sata-oob"); if (IS_ERR(tegra->sata_oob_rst)) { dev_err(&pdev->dev, "Failed to get sata-oob reset\n"); return PTR_ERR(tegra->sata_oob_rst); } tegra->sata_cold_rst = devm_reset_control_get(&pdev->dev, "sata-cold"); if (IS_ERR(tegra->sata_cold_rst)) { dev_err(&pdev->dev, "Failed to get sata-cold reset\n"); return PTR_ERR(tegra->sata_cold_rst); } tegra->sata_clk = devm_clk_get(&pdev->dev, "sata"); if (IS_ERR(tegra->sata_clk)) { dev_err(&pdev->dev, "Failed to get sata clock\n"); return PTR_ERR(tegra->sata_clk); } tegra->supplies[0].supply = "avdd"; tegra->supplies[1].supply = "hvdd"; tegra->supplies[2].supply = "vddio"; tegra->supplies[3].supply = "target-5v"; tegra->supplies[4].supply = "target-12v"; ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(tegra->supplies), tegra->supplies); if (ret) { dev_err(&pdev->dev, "Failed to get regulators\n"); return ret; } ret = tegra_ahci_controller_init(hpriv); if (ret) return ret; ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info); if (ret) goto deinit_controller; return 0; deinit_controller: tegra_ahci_controller_deinit(hpriv); return ret; };
static int tegra_uart_probe(struct platform_device *pdev) { struct tegra_uart_port *tup; struct uart_port *u; struct resource *resource; int ret; const struct tegra_uart_chip_data *cdata; const struct of_device_id *match; match = of_match_device(tegra_uart_of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } cdata = match->data; tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL); if (!tup) { dev_err(&pdev->dev, "Failed to allocate memory for tup\n"); return -ENOMEM; } ret = tegra_uart_parse_dt(pdev, tup); if (ret < 0) return ret; u = &tup->uport; u->dev = &pdev->dev; u->ops = &tegra_uart_ops; u->type = PORT_TEGRA; u->fifosize = 32; tup->cdata = cdata; platform_set_drvdata(pdev, tup); resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { dev_err(&pdev->dev, "No IO memory resource\n"); return -ENODEV; } u->mapbase = resource->start; u->membase = devm_ioremap_resource(&pdev->dev, resource); if (IS_ERR(u->membase)) return PTR_ERR(u->membase); tup->uart_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(tup->uart_clk)) { dev_err(&pdev->dev, "Couldn't get the clock\n"); return PTR_ERR(tup->uart_clk); } tup->rst = devm_reset_control_get(&pdev->dev, "serial"); if (IS_ERR(tup->rst)) { dev_err(&pdev->dev, "Couldn't get the reset\n"); return PTR_ERR(tup->rst); } u->iotype = UPIO_MEM32; u->irq = platform_get_irq(pdev, 0); u->regshift = 2; ret = uart_add_one_port(&tegra_uart_driver, u); if (ret < 0) { dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret); return ret; } return ret; }
static int __devinit gpu_probe(struct platform_device *pdev) #endif { int ret = -ENODEV; struct resource* res; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) struct contiguous_mem_pool *pool; struct reset_control *rstc; #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) struct device_node *dn =pdev->dev.of_node; const u32 *prop; #else struct viv_gpu_platform_data *pdata; #endif gcmkHEADER(); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr"); if (res) baseAddress = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d"); if (res) irqLine = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d"); if (res) { registerMemBase = res->start; registerMemSize = res->end - res->start + 1; } res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d"); if (res) irqLine2D = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d"); if (res) { registerMemBase2D = res->start; registerMemSize2D = res->end - res->start + 1; } res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg"); if (res) irqLineVG = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg"); if (res) { registerMemBaseVG = res->start; registerMemSizeVG = res->end - res->start + 1; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL); if (!pool) return -ENOMEM; pool->size = contiguousSize; init_dma_attrs(&pool->attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs); pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys, GFP_KERNEL, &pool->attrs); if (!pool->virt) { dev_err(&pdev->dev, "Failed to allocate contiguous memory\n"); return -ENOMEM; } contiguousBase = pool->phys; dev_set_drvdata(&pdev->dev, pool); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) prop = of_get_property(dn, "contiguousbase", NULL); if(prop) contiguousBase = *prop; of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize); #else pdata = pdev->dev.platform_data; if (pdata) { contiguousBase = pdata->reserved_mem_base; contiguousSize = pdata->reserved_mem_size; } #endif if (contiguousSize == 0) gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n "); ret = drv_init(&pdev->dev); if (!ret) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) rstc = devm_reset_control_get(&pdev->dev, "gpu3d"); galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc; rstc = devm_reset_control_get(&pdev->dev, "gpu2d"); galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc; rstc = devm_reset_control_get(&pdev->dev, "gpuvg"); galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc; #endif platform_set_drvdata(pdev, galDevice); #if gcdENABLE_FSCALE_VAL_ADJUST if (galDevice->kernels[gcvCORE_MAJOR]) REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); #endif gcmkFOOTER_NO(); return ret; } #if gcdENABLE_FSCALE_VAL_ADJUST UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys, &pool->attrs); #endif gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret); return ret; }
static int sun8i_tcon_top_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct clk_hw_onecell_data *clk_data; struct sun8i_tcon_top *tcon_top; const struct sun8i_tcon_top_quirks *quirks; struct resource *res; void __iomem *regs; int ret, i; quirks = of_device_get_match_data(&pdev->dev); tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL); if (!tcon_top) return -ENOMEM; clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_NUM), GFP_KERNEL); if (!clk_data) return -ENOMEM; tcon_top->clk_data = clk_data; spin_lock_init(&tcon_top->reg_lock); tcon_top->rst = devm_reset_control_get(dev, NULL); if (IS_ERR(tcon_top->rst)) { dev_err(dev, "Couldn't get our reset line\n"); return PTR_ERR(tcon_top->rst); } tcon_top->bus = devm_clk_get(dev, "bus"); if (IS_ERR(tcon_top->bus)) { dev_err(dev, "Couldn't get the bus clock\n"); return PTR_ERR(tcon_top->bus); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); tcon_top->regs = regs; if (IS_ERR(regs)) return PTR_ERR(regs); ret = reset_control_deassert(tcon_top->rst); if (ret) { dev_err(dev, "Could not deassert ctrl reset control\n"); return ret; } ret = clk_prepare_enable(tcon_top->bus); if (ret) { dev_err(dev, "Could not enable bus clock\n"); goto err_assert_reset; } /* * At least on H6, some registers have some bits set by default * which may cause issues. Clear them here. */ writel(0, regs + TCON_TOP_PORT_SEL_REG); writel(0, regs + TCON_TOP_GATE_SRC_REG); /* * TCON TOP has two muxes, which select parent clock for each TCON TV * channel clock. Parent could be either TCON TV or TVE clock. For now * we leave this fixed to TCON TV, since TVE driver for R40 is not yet * implemented. Once it is, graph needs to be traversed to determine * if TVE is active on each TCON TV. If it is, mux should be switched * to TVE clock parent. */ clk_data->hws[CLK_TCON_TOP_TV0] = sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV0_GATE, 0); if (quirks->has_tcon_tv1) clk_data->hws[CLK_TCON_TOP_TV1] = sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs, &tcon_top->reg_lock, TCON_TOP_TCON_TV1_GATE, 1); if (quirks->has_dsi) clk_data->hws[CLK_TCON_TOP_DSI] = sun8i_tcon_top_register_gate(dev, "dsi", regs, &tcon_top->reg_lock, TCON_TOP_TCON_DSI_GATE, 2); for (i = 0; i < CLK_NUM; i++) if (IS_ERR(clk_data->hws[i])) { ret = PTR_ERR(clk_data->hws[i]); goto err_unregister_gates; } clk_data->num = CLK_NUM; ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, clk_data); if (ret) goto err_unregister_gates; dev_set_drvdata(dev, tcon_top); return 0; err_unregister_gates: for (i = 0; i < CLK_NUM; i++) if (!IS_ERR_OR_NULL(clk_data->hws[i])) clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); err_assert_reset: reset_control_assert(tcon_top->rst); return ret; }
static int rk_crypto_probe(struct platform_device *pdev) { struct resource *res; struct device *dev = &pdev->dev; struct rk_crypto_info *crypto_info; int err = 0; crypto_info = devm_kzalloc(&pdev->dev, sizeof(*crypto_info), GFP_KERNEL); if (!crypto_info) { err = -ENOMEM; goto err_crypto; } crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); if (IS_ERR(crypto_info->rst)) { err = PTR_ERR(crypto_info->rst); goto err_crypto; } reset_control_assert(crypto_info->rst); usleep_range(10, 20); reset_control_deassert(crypto_info->rst); err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); if (err) goto err_crypto; spin_lock_init(&crypto_info->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); goto err_crypto; } crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); if (IS_ERR(crypto_info->aclk)) { err = PTR_ERR(crypto_info->aclk); goto err_crypto; } crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR(crypto_info->hclk)) { err = PTR_ERR(crypto_info->hclk); goto err_crypto; } crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); if (IS_ERR(crypto_info->sclk)) { err = PTR_ERR(crypto_info->sclk); goto err_crypto; } crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(crypto_info->dmaclk)) { err = PTR_ERR(crypto_info->dmaclk); goto err_crypto; } crypto_info->irq = platform_get_irq(pdev, 0); if (crypto_info->irq < 0) { dev_warn(crypto_info->dev, "control Interrupt is not available.\n"); err = crypto_info->irq; goto err_crypto; } err = devm_request_irq(&pdev->dev, crypto_info->irq, rk_crypto_irq_handle, IRQF_SHARED, "rk-crypto", pdev); if (err) { dev_err(crypto_info->dev, "irq request failed.\n"); goto err_crypto; } crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); tasklet_init(&crypto_info->queue_task, rk_crypto_queue_task_cb, (unsigned long)crypto_info); tasklet_init(&crypto_info->done_task, rk_crypto_done_task_cb, (unsigned long)crypto_info); crypto_init_queue(&crypto_info->queue, 50); crypto_info->enable_clk = rk_crypto_enable_clk; crypto_info->disable_clk = rk_crypto_disable_clk; crypto_info->load_data = rk_load_data; crypto_info->unload_data = rk_unload_data; crypto_info->enqueue = rk_crypto_enqueue; crypto_info->busy = false; err = rk_crypto_register(crypto_info); if (err) { dev_err(dev, "err in register alg"); goto err_register_alg; } dev_info(dev, "Crypto Accelerator successfully registered\n"); return 0; err_register_alg: tasklet_kill(&crypto_info->queue_task); tasklet_kill(&crypto_info->done_task); err_crypto: return err; }
static int aml_dai_spdif_probe(struct platform_device *pdev) { int i, ret; struct reset_control *spdif_reset; struct aml_spdif *spdif_priv; pr_info("aml_spdif_probe\n"); /* enable spdif power gate first */ for (i = 0; i < ARRAY_SIZE(gate_names); i++) { spdif_reset = devm_reset_control_get(&pdev->dev, gate_names[i]); if (IS_ERR(spdif_reset)) { dev_err(&pdev->dev, "Can't get aml audio gate\n"); return PTR_ERR(spdif_reset); } reset_control_deassert(spdif_reset); } spdif_priv = devm_kzalloc(&pdev->dev, sizeof(struct aml_spdif), GFP_KERNEL); if (!spdif_priv) { dev_err(&pdev->dev, "Can't allocate spdif_priv\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, spdif_priv); spdif_p = spdif_priv; spdif_priv->clk_mpl1 = devm_clk_get(&pdev->dev, "mpll1"); if (IS_ERR(spdif_priv->clk_mpl1)) { dev_err(&pdev->dev, "Can't retrieve mpll1 clock\n"); ret = PTR_ERR(spdif_priv->clk_mpl1); goto err; } spdif_priv->clk_i958 = devm_clk_get(&pdev->dev, "i958"); if (IS_ERR(spdif_priv->clk_i958)) { dev_err(&pdev->dev, "Can't retrieve spdif clk_i958 clock\n"); ret = PTR_ERR(spdif_priv->clk_i958); goto err; } spdif_priv->clk_mclk = devm_clk_get(&pdev->dev, "mclk"); if (IS_ERR(spdif_priv->clk_mclk)) { dev_err(&pdev->dev, "Can't retrieve spdif clk_mclk clock\n"); ret = PTR_ERR(spdif_priv->clk_mclk); goto err; } spdif_priv->clk_spdif = devm_clk_get(&pdev->dev, "spdif"); if (IS_ERR(spdif_priv->clk_spdif)) { dev_err(&pdev->dev, "Can't retrieve spdif clock\n"); ret = PTR_ERR(spdif_priv->clk_spdif); goto err; } ret = clk_set_parent(spdif_priv->clk_i958, spdif_priv->clk_mpl1); if (ret) { pr_err("Can't set i958 clk parent: %d\n", ret); return ret; } ret = clk_set_parent(spdif_priv->clk_spdif, spdif_priv->clk_i958); if (ret) { pr_err("Can't set spdif clk parent: %d\n", ret); return ret; } ret = clk_prepare_enable(spdif_priv->clk_spdif); if (ret) { pr_err("Can't enable spdif clock: %d\n", ret); goto err; } aml_spdif_play(); ret = snd_soc_register_component(&pdev->dev, &aml_component, aml_spdif_dai, ARRAY_SIZE(aml_spdif_dai)); if (ret) { pr_err("Can't register spdif dai: %d\n", ret); goto err_clk_dis; } return 0; err_clk_dis: clk_disable_unprepare(spdif_priv->clk_spdif); err: return ret; }
static int rockchip_thermal_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct rockchip_thermal_data *thermal; const struct of_device_id *match; struct resource *res; int irq; int i; int error; match = of_match_node(of_rockchip_thermal_match, np); if (!match) return -ENXIO; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return -EINVAL; } thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data), GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->pdev = pdev; thermal->chip = (const struct rockchip_tsadc_chip *)match->data; if (!thermal->chip) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); thermal->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(thermal->regs)) return PTR_ERR(thermal->regs); thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb"); if (IS_ERR(thermal->reset)) { error = PTR_ERR(thermal->reset); dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error); return error; } thermal->clk = devm_clk_get(&pdev->dev, "tsadc"); if (IS_ERR(thermal->clk)) { error = PTR_ERR(thermal->clk); dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error); return error; } thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(thermal->pclk)) { error = PTR_ERR(thermal->pclk); dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->clk); if (error) { dev_err(&pdev->dev, "failed to enable converter clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->pclk); if (error) { dev_err(&pdev->dev, "failed to enable pclk: %d\n", error); goto err_disable_clk; } rockchip_thermal_reset_controller(thermal->reset); error = rockchip_configure_from_dt(&pdev->dev, np, thermal); if (error) { dev_err(&pdev->dev, "failed to parse device tree data: %d\n", error); goto err_disable_pclk; } thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[0], SENSOR_CPU); if (error) { dev_err(&pdev->dev, "failed to register CPU thermal sensor: %d\n", error); goto err_disable_pclk; } error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[1], SENSOR_GPU); if (error) { dev_err(&pdev->dev, "failed to register GPU thermal sensor: %d\n", error); goto err_unregister_cpu_sensor; } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, &rockchip_thermal_alarm_irq_thread, IRQF_ONESHOT, "rockchip_thermal", thermal); if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); goto err_unregister_gpu_sensor; } thermal->chip->control(thermal->regs, true); for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; err_unregister_gpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); err_unregister_cpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: clk_disable_unprepare(thermal->clk); return error; }
static int host1x_probe(struct platform_device *pdev) { const struct of_device_id *id; struct host1x *host; struct resource *regs; int syncpt_irq; int err; id = of_match_device(host1x_of_match, &pdev->dev); if (!id) return -EINVAL; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "failed to get registers\n"); return -ENXIO; } syncpt_irq = platform_get_irq(pdev, 0); if (syncpt_irq < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); return -ENXIO; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); host->dev = &pdev->dev; host->info = id->data; /* set common host1x device data */ platform_set_drvdata(pdev, host); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); dma_set_mask_and_coherent(host->dev, host->info->dma_mask); if (host->info->init) { err = host->info->init(host); if (err) return err; } host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = PTR_ERR(host->clk); return err; } host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } if (iommu_present(&platform_bus_type)) { struct iommu_domain_geometry *geometry; unsigned long order; host->domain = iommu_domain_alloc(&platform_bus_type); if (!host->domain) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); if (err) goto fail_free_domain; geometry = &host->domain->geometry; order = __ffs(host->domain->pgsize_bitmap); init_iova_domain(&host->iova, 1UL << order, geometry->aperture_start >> order, geometry->aperture_end >> order); host->iova_end = geometry->aperture_end; } err = host1x_channel_list_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); goto fail_detach_device; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); goto fail_detach_device; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); goto fail_unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); goto fail_reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); goto fail_deinit_syncpt; } host1x_debug_init(host); err = host1x_register(host); if (err < 0) goto fail_deinit_intr; return 0; fail_deinit_intr: host1x_intr_deinit(host); fail_deinit_syncpt: host1x_syncpt_deinit(host); fail_reset_assert: reset_control_assert(host->rst); fail_unprepare_disable: clk_disable_unprepare(host->clk); fail_detach_device: if (host->domain) { put_iova_domain(&host->iova); iommu_detach_device(host->domain, &pdev->dev); } fail_free_domain: if (host->domain) iommu_domain_free(host->domain); return err; }
static int stih407_usb2_picophy_probe(struct platform_device *pdev) { struct stih407_usb2_picophy *phy_dev; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_provider *phy_provider; struct phy *phy; struct resource *res; phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL); if (!phy_dev) return -ENOMEM; phy_dev->dev = dev; dev_set_drvdata(dev, phy_dev); phy_dev->rstc = devm_reset_control_get(dev, "global"); if (IS_ERR(phy_dev->rstc)) { dev_err(dev, "failed to ctrl picoPHY reset\n"); return PTR_ERR(phy_dev->rstc); } phy_dev->rstport = devm_reset_control_get(dev, "port"); if (IS_ERR(phy_dev->rstport)) { dev_err(dev, "failed to ctrl picoPHY reset\n"); return PTR_ERR(phy_dev->rstport); } /* Reset port by default: only deassert it in phy init */ reset_control_assert(phy_dev->rstport); phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); if (IS_ERR(phy_dev->regmap)) { dev_err(dev, "No syscfg phandle specified\n"); return PTR_ERR(phy_dev->regmap); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); if (!res) { dev_err(dev, "No ctrl reg found\n"); return -ENXIO; } phy_dev->ctrl = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "param"); if (!res) { dev_err(dev, "No param reg found\n"); return -ENXIO; } phy_dev->param = res->start; phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data); if (IS_ERR(phy)) { dev_err(dev, "failed to create Display Port PHY\n"); return PTR_ERR(phy); } phy_dev->phy = phy; phy_set_drvdata(phy, phy_dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); dev_info(dev, "STiH407 USB Generic picoPHY driver probed!"); return 0; }
static int xhci_histb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct xhci_hcd_histb *histb; const struct hc_driver *driver; struct usb_hcd *hcd; struct xhci_hcd *xhci; struct resource *res; int irq; int ret = -ENODEV; if (usb_disabled()) return -ENODEV; driver = &xhci_histb_hc_driver; histb = devm_kzalloc(dev, sizeof(*histb), GFP_KERNEL); if (!histb) return -ENOMEM; histb->dev = dev; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); histb->ctrl = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(histb->ctrl)) return PTR_ERR(histb->ctrl); ret = xhci_histb_clks_get(histb); if (ret) return ret; histb->soft_reset = devm_reset_control_get(dev, "soft"); if (IS_ERR(histb->soft_reset)) { dev_err(dev, "failed to get soft reset\n"); return PTR_ERR(histb->soft_reset); } pm_runtime_enable(dev); pm_runtime_get_sync(dev); device_enable_async_suspend(dev); /* Initialize dma_mask and coherent_dma_mask to 32-bits */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; hcd = usb_create_hcd(driver, dev, dev_name(dev)); if (!hcd) { ret = -ENOMEM; goto disable_pm; } hcd->regs = histb->ctrl; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); histb->hcd = hcd; dev_set_drvdata(hcd->self.controller, histb); ret = xhci_histb_host_enable(histb); if (ret) goto put_hcd; xhci = hcd_to_xhci(hcd); device_wakeup_enable(hcd->self.controller); xhci->main_hcd = hcd; xhci->shared_hcd = usb_create_shared_hcd(driver, dev, dev_name(dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto disable_host; } if (device_property_read_bool(dev, "usb2-lpm-disable")) xhci->quirks |= XHCI_HW_LPM_DISABLE; if (device_property_read_bool(dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; /* imod_interval is the interrupt moderation value in nanoseconds. */ xhci->imod_interval = 40000; device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; if (HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto dealloc_usb2_hcd; device_enable_async_suspend(dev); pm_runtime_put_noidle(dev); /* * Prevent runtime pm from being on as default, users should enable * runtime pm using power/control in sysfs. */ pm_runtime_forbid(dev); return 0; dealloc_usb2_hcd: usb_remove_hcd(hcd); put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); disable_host: xhci_histb_host_disable(histb); put_hcd: usb_put_hcd(hcd); disable_pm: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; }
static int nss_probe(struct platform_device *nss_dev) #endif { struct nss_top_instance *nss_top = &nss_top_main; struct nss_ctx_instance *nss_ctx = NULL; struct nss_platform_data *npd = NULL; struct netdev_priv_instance *ndev_priv; #if (NSS_DT_SUPPORT == 1) struct reset_control *rstctl = NULL; #endif int i, err = 0; const struct firmware *nss_fw = NULL; int rc = -ENODEV; void __iomem *load_mem; #if (NSS_DT_SUPPORT == 1) struct device_node *np = NULL; if (nss_top_main.nss_hal_common_init_done == false) { /* * Perform clock init common to all NSS cores */ struct clk *nss_tcm_src = NULL; struct clk *nss_tcm_clk = NULL; /* * Attach debug interface to TLMM */ nss_write_32((uint32_t)nss_top_main.nss_fpb_base, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360); /* * NSS TCM CLOCK */ nss_tcm_src = clk_get(&nss_dev->dev, NSS_TCM_SRC_CLK); if (IS_ERR(nss_tcm_src)) { pr_err("nss-driver: cannot get clock: " NSS_TCM_SRC_CLK); return -EFAULT; } clk_set_rate(nss_tcm_src, NSSTCM_FREQ); clk_prepare(nss_tcm_src); clk_enable(nss_tcm_src); nss_tcm_clk = clk_get(&nss_dev->dev, NSS_TCM_CLK); if (IS_ERR(nss_tcm_clk)) { pr_err("nss-driver: cannot get clock: " NSS_TCM_CLK); return -EFAULT; } clk_prepare(nss_tcm_clk); clk_enable(nss_tcm_clk); nss_top_main.nss_hal_common_init_done = true; nss_info("nss_hal_common_reset Done.\n"); } if (nss_dev->dev.of_node) { /* * Device Tree based init */ np = of_node_get(nss_dev->dev.of_node); npd = nss_drv_of_get_pdata(np, nss_dev); of_node_put(np); if (!npd) { return -EFAULT; } nss_ctx = &nss_top->nss[npd->id]; nss_ctx->id = npd->id; nss_dev->id = nss_ctx->id; } else { /* * Platform Device based init */ npd = (struct nss_platform_data *) nss_dev->dev.platform_data; nss_ctx = &nss_top->nss[nss_dev->id]; nss_ctx->id = nss_dev->id; } #else npd = (struct nss_platform_data *) nss_dev->dev.platform_data; nss_ctx = &nss_top->nss[nss_dev->id]; nss_ctx->id = nss_dev->id; #endif nss_ctx->nss_top = nss_top; nss_info("%p: NSS_DEV_ID %s \n", nss_ctx, dev_name(&nss_dev->dev)); /* * F/W load from NSS Driver */ if (nss_ctx->id == 0) { rc = request_firmware(&nss_fw, NETAP0_IMAGE, &(nss_dev->dev)); } else if (nss_ctx->id == 1) { rc = request_firmware(&nss_fw, NETAP1_IMAGE, &(nss_dev->dev)); } else { nss_warning("%p: Invalid nss dev: %d \n", nss_ctx, nss_dev->id); } /* * Check if the file read is successful */ if (rc) { nss_warning("%p: request_firmware failed with err code: %d", nss_ctx, rc); err = rc; goto err_init_0; } if (nss_fw->size < MIN_IMG_SIZE) { nss_warning("%p: nss firmware is truncated, size:%d", nss_ctx, nss_fw->size); } load_mem = ioremap_nocache(npd->load_addr, nss_fw->size); if (load_mem == NULL) { nss_warning("%p: ioremap_nocache failed: %x", nss_ctx, npd->load_addr); release_firmware(nss_fw); goto err_init_0; } printk("nss_driver - fw of size %u bytes copied to load addr: %x, nss_id : %d\n", nss_fw->size, npd->load_addr, nss_dev->id); memcpy_toio(load_mem, nss_fw->data, nss_fw->size); release_firmware(nss_fw); iounmap(load_mem); /* * Both NSS cores controlled by same regulator, Hook only Once */ if (!nss_ctx->id) { nss_core0_clk = clk_get(&nss_dev->dev, "nss_core_clk"); if (IS_ERR(nss_core0_clk)) { err = PTR_ERR(nss_core0_clk); nss_info("%p: Regulator %s get failed, err=%d\n", nss_ctx, dev_name(&nss_dev->dev), err); return err; } clk_set_rate(nss_core0_clk, NSS_FREQ_550); clk_prepare(nss_core0_clk); clk_enable(nss_core0_clk); #if (NSS_PM_SUPPORT == 1) /* * Check if turbo is supported */ if (npd->turbo_frequency) { /* * Turbo is supported */ printk("nss_driver - Turbo Support %d\n", npd->turbo_frequency); nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES; nss_pm_set_turbo(); } else { printk("nss_driver - Turbo No Support %d\n", npd->turbo_frequency); nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES - 1; } #else printk("nss_driver - Turbo Not Supported\n"); #endif } /* * Get load address of NSS firmware */ nss_info("%p: Setting NSS%d Firmware load address to %x\n", nss_ctx, nss_ctx->id, npd->load_addr); nss_top->nss[nss_ctx->id].load = npd->load_addr; /* * Get virtual and physical memory addresses for nss logical/hardware address maps */ /* * Virtual address of CSM space */ nss_ctx->nmap = npd->nmap; nss_assert(nss_ctx->nmap); /* * Physical address of CSM space */ nss_ctx->nphys = npd->nphys; nss_assert(nss_ctx->nphys); /* * Virtual address of logical registers space */ nss_ctx->vmap = npd->vmap; nss_assert(nss_ctx->vmap); /* * Physical address of logical registers space */ nss_ctx->vphys = npd->vphys; nss_assert(nss_ctx->vphys); nss_info("%d:ctx=%p, vphys=%x, vmap=%x, nphys=%x, nmap=%x", nss_ctx->id, nss_ctx, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap); /* * Register netdevice handlers */ nss_ctx->int_ctx[0].ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-dev%d", nss_dummy_netdev_setup); if (nss_ctx->int_ctx[0].ndev == NULL) { nss_warning("%p: Could not allocate net_device #0", nss_ctx); err = -ENOMEM; goto err_init_0; } nss_ctx->int_ctx[0].ndev->netdev_ops = &nss_netdev_ops; nss_ctx->int_ctx[0].ndev->ethtool_ops = &nss_ethtool_ops; err = register_netdev(nss_ctx->int_ctx[0].ndev); if (err) { nss_warning("%p: Could not register net_device #0", nss_ctx); goto err_init_1; } /* * request for IRQs * * WARNING: CPU affinities should be set using OS supported methods */ nss_ctx->int_ctx[0].nss_ctx = nss_ctx; nss_ctx->int_ctx[0].shift_factor = 0; nss_ctx->int_ctx[0].irq = npd->irq[0]; err = request_irq(npd->irq[0], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[0]); if (err) { nss_warning("%d: IRQ0 request failed", nss_dev->id); goto err_init_2; } /* * Register NAPI for NSS core interrupt #0 */ ndev_priv = netdev_priv(nss_ctx->int_ctx[0].ndev); ndev_priv->int_ctx = &nss_ctx->int_ctx[0]; netif_napi_add(nss_ctx->int_ctx[0].ndev, &nss_ctx->int_ctx[0].napi, nss_core_handle_napi, 64); napi_enable(&nss_ctx->int_ctx[0].napi); nss_ctx->int_ctx[0].napi_active = true; /* * Check if second interrupt is supported on this nss core */ if (npd->num_irq > 1) { nss_info("%d: This NSS core supports two interrupts", nss_dev->id); /* * Register netdevice handlers */ nss_ctx->int_ctx[1].ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-dev%d", nss_dummy_netdev_setup); if (nss_ctx->int_ctx[1].ndev == NULL) { nss_warning("%p: Could not allocate net_device #1", nss_ctx); err = -ENOMEM; goto err_init_3; } nss_ctx->int_ctx[1].ndev->netdev_ops = &nss_netdev_ops; nss_ctx->int_ctx[1].ndev->ethtool_ops = &nss_ethtool_ops; err = register_netdev(nss_ctx->int_ctx[1].ndev); if (err) { nss_warning("%p: Could not register net_device #1", nss_ctx); goto err_init_4; } nss_ctx->int_ctx[1].nss_ctx = nss_ctx; nss_ctx->int_ctx[1].shift_factor = 15; nss_ctx->int_ctx[1].irq = npd->irq[1]; err = request_irq(npd->irq[1], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[1]); if (err) { nss_warning("%d: IRQ1 request failed for nss", nss_dev->id); goto err_init_5; } /* * Register NAPI for NSS core interrupt #1 */ ndev_priv = netdev_priv(nss_ctx->int_ctx[1].ndev); ndev_priv->int_ctx = &nss_ctx->int_ctx[1]; netif_napi_add(nss_ctx->int_ctx[1].ndev, &nss_ctx->int_ctx[1].napi, nss_core_handle_napi, 64); napi_enable(&nss_ctx->int_ctx[1].napi); nss_ctx->int_ctx[1].napi_active = true; } spin_lock_bh(&(nss_top->lock)); /* * Check functionalities are supported by this NSS core */ if (npd->shaping_enabled == NSS_FEATURE_ENABLED) { nss_top->shaping_handler_id = nss_dev->id; printk(KERN_INFO "%p: NSS Shaping is enabled, handler id: %u\n", __func__, nss_top->shaping_handler_id); } if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv4_handler_id = nss_dev->id; nss_ipv4_register_handler(); nss_pppoe_register_handler(); nss_eth_rx_register_handler(); nss_n2h_register_handler(); nss_virt_if_register_handler(); nss_lag_register_handler(); nss_dynamic_interface_register_handler(); nss_top->capwap_handler_id = nss_dev->id; nss_capwap_init(); for (i = 0; i < NSS_MAX_VIRTUAL_INTERFACES; i++) { nss_top->virt_if_handler_id[i] = nss_dev->id; } nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR] = nss_dev->id; } if (npd->ipv4_reasm_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv4_reasm_handler_id = nss_dev->id; nss_ipv4_reasm_register_handler(); } if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv6_handler_id = nss_dev->id; nss_ipv6_register_handler(); } if (npd->crypto_enabled == NSS_FEATURE_ENABLED) { nss_top->crypto_handler_id = nss_dev->id; nss_crypto_register_handler(); } if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) { nss_top->ipsec_handler_id = nss_dev->id; nss_ipsec_register_handler(); } if (npd->wlan_enabled == NSS_FEATURE_ENABLED) { nss_top->wlan_handler_id = nss_dev->id; } if (npd->tun6rd_enabled == NSS_FEATURE_ENABLED) { nss_top->tun6rd_handler_id = nss_dev->id; } if (npd->tunipip6_enabled == NSS_FEATURE_ENABLED) { nss_top->tunipip6_handler_id = nss_dev->id; nss_tunipip6_register_handler(); } if (npd->gre_redir_enabled == NSS_FEATURE_ENABLED) { nss_top->gre_redir_handler_id = nss_dev->id; nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR] = nss_dev->id; nss_gre_redir_register_handler(); nss_sjack_register_handler(); } /* * Mark data plane enabled so when nss core init done we call register to nss-gmac */ for (i = 0 ; i < NSS_MAX_PHYSICAL_INTERFACES ; i ++) { if (npd->gmac_enabled[i] == NSS_FEATURE_ENABLED) { nss_data_plane_set_enabled(i); } } #if (NSS_PM_SUPPORT == 1) nss_freq_register_handler(); #endif nss_lso_rx_register_handler(); nss_top->frequency_handler_id = nss_dev->id; spin_unlock_bh(&(nss_top->lock)); /* * Initialize decongestion callbacks to NULL */ for (i = 0; i< NSS_MAX_CLIENTS; i++) { nss_ctx->queue_decongestion_callback[i] = 0; nss_ctx->queue_decongestion_ctx[i] = 0; } spin_lock_init(&(nss_ctx->decongest_cb_lock)); nss_ctx->magic = NSS_CTX_MAGIC; nss_info("%p: Reseting NSS core %d now", nss_ctx, nss_ctx->id); /* * Enable clocks and bring NSS core out of reset */ #if (NSS_DT_SUPPORT == 1) /* * Remove UBI32 reset clamp */ rstctl = devm_reset_control_get(&nss_dev->dev, "clkrst_clamp"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert UBI32 reset clamp failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 core clamp */ rstctl = devm_reset_control_get(&nss_dev->dev, "clamp"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert UBI32 core clamp failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 AHB reset */ rstctl = devm_reset_control_get(&nss_dev->dev, "ahb"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert AHB reset failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 AXI reset */ rstctl = devm_reset_control_get(&nss_dev->dev, "axi"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert AXI reset failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); nss_hal_core_reset(nss_ctx->nmap, nss_ctx->load); #else nss_hal_core_reset(nss_dev->id, nss_ctx->nmap, nss_ctx->load, nss_top->clk_src); #endif /* * Enable interrupts for NSS core */ nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq, nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); if (npd->num_irq > 1) { nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq, nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); } /* * Initialize max buffer size for NSS core */ nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE; nss_info("%p: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id); goto err_init_0; err_init_5: unregister_netdev(nss_ctx->int_ctx[1].ndev); err_init_4: free_netdev(nss_ctx->int_ctx[1].ndev); err_init_3: free_irq(npd->irq[0], &nss_ctx->int_ctx[0]); err_init_2: unregister_netdev(nss_ctx->int_ctx[0].ndev); err_init_1: free_netdev(nss_ctx->int_ctx[0].ndev); #if (NSS_DT_SUPPORT == 1) if (nss_dev->dev.of_node) { if (npd->nmap) { iounmap((void *)npd->nmap); } if (npd->vmap) { iounmap((void *)npd->vmap); } } #endif err_init_0: #if (NSS_DT_SUPPORT == 1) if (nss_dev->dev.of_node) { devm_kfree(&nss_dev->dev, npd); } #endif return err; }