static int mxhci_hsic_probe(struct platform_device *pdev) { struct hc_driver *driver; struct device_node *node = pdev->dev.of_node; struct mxhci_hsic_hcd *mxhci; struct xhci_hcd *xhci; struct resource *res; struct usb_hcd *hcd; unsigned int reg; int ret; int irq; u32 tmp[3]; if (usb_disabled()) return -ENODEV; driver = &mxhci_hsic_hc_driver; pdev->dev.dma_mask = &dma_mask; /* dbg log event settings */ dbg_hsic.log_events = enable_dbg_log; dbg_hsic.log_payload = enable_payload_log; dbg_hsic.inep_log_mask = ep_addr_rxdbg_mask; dbg_hsic.outep_log_mask = ep_addr_rxdbg_mask; /* usb2.0 root hub */ driver->hcd_priv_size = sizeof(struct mxhci_hsic_hcd); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENODEV; goto put_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto put_hcd; } hcd_to_bus(hcd)->skip_resume = true; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = devm_request_and_ioremap(&pdev->dev, res); if (!hcd->regs) { dev_err(&pdev->dev, "error mapping memory\n"); ret = -EFAULT; goto put_hcd; } mxhci = hcd_to_hsic(hcd); mxhci->dev = &pdev->dev; mxhci->strobe = of_get_named_gpio(node, "hsic,strobe-gpio", 0); if (mxhci->strobe < 0) { ret = -EINVAL; goto put_hcd; } mxhci->data = of_get_named_gpio(node, "hsic,data-gpio", 0); if (mxhci->data < 0) { ret = -EINVAL; goto put_hcd; } ret = of_property_read_u32_array(node, "qcom,vdd-voltage-level", tmp, ARRAY_SIZE(tmp)); if (!ret) { mxhci->vdd_no_vol_level = tmp[0]; mxhci->vdd_low_vol_level = tmp[1]; mxhci->vdd_high_vol_level = tmp[2]; } else { dev_err(&pdev->dev, "failed to read qcom,vdd-voltage-level property\n"); ret = -EINVAL; goto put_hcd; } ret = mxhci_msm_config_gdsc(mxhci, 1); if (ret) { dev_err(&pdev->dev, "unable to configure hsic gdsc\n"); goto put_hcd; } ret = mxhci_hsic_init_clocks(mxhci, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize clocks\n"); goto put_hcd; } ret = mxhci_hsic_init_vddcx(mxhci, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize vddcx\n"); goto deinit_clocks; } mxhci_hsic_reset(mxhci); /* HSIC phy caliberation:set periodic caliberation interval ~2.048sec */ mxhci_hsic_ulpi_write(mxhci, 0xFF, MSM_HSIC_IO_CAL_PER); /* Enable periodic IO calibration in HSIC_CFG register */ mxhci_hsic_ulpi_write(mxhci, 0xA8, MSM_HSIC_CFG); /* Configure Strobe and Data GPIOs to enable HSIC */ ret = mxhci_hsic_config_gpios(mxhci); if (ret) { dev_err(mxhci->dev, " gpio configuarion failed\n"); goto deinit_vddcx; } /* enable STROBE_PAD_CTL */ reg = readl_relaxed(TLMM_GPIO_HSIC_STROBE_PAD_CTL); writel_relaxed(reg | 0x2000000, TLMM_GPIO_HSIC_STROBE_PAD_CTL); /* enable DATA_PAD_CTL */ reg = readl_relaxed(TLMM_GPIO_HSIC_DATA_PAD_CTL); writel_relaxed(reg | 0x2000000, TLMM_GPIO_HSIC_DATA_PAD_CTL); mb(); /* Enable LPM in Sleep mode and suspend mode */ reg = readl_relaxed(MSM_HSIC_CTRL_REG); reg |= CTRLREG_PLL_CTRL_SLEEP | CTRLREG_PLL_CTRL_SUSP; writel_relaxed(reg, MSM_HSIC_CTRL_REG); if (of_property_read_bool(node, "qti,disable-hw-clk-gating")) { reg = readl_relaxed(MSM_HSIC_GCTL); writel_relaxed((reg | GCTL_DSBLCLKGTNG), MSM_HSIC_GCTL); } /* enable pwr event irq for LPM_IN_L2_IRQ */ writel_relaxed(LPM_IN_L2_IRQ_MASK, MSM_HSIC_PWR_EVNT_IRQ_MASK); mxhci->wakeup_irq = platform_get_irq_byname(pdev, "wakeup_irq"); if (mxhci->wakeup_irq < 0) { mxhci->wakeup_irq = 0; dev_err(&pdev->dev, "failed to init wakeup_irq\n"); } else { /* enable wakeup irq only when entering lpm */ irq_set_status_flags(mxhci->wakeup_irq, IRQ_NOAUTOEN); ret = devm_request_irq(&pdev->dev, mxhci->wakeup_irq, mxhci_hsic_wakeup_irq, 0, "mxhci_hsic_wakeup", mxhci); if (ret) { dev_err(&pdev->dev, "request irq failed (wakeup irq)\n"); goto deinit_vddcx; } } ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto deinit_vddcx; hcd = dev_get_drvdata(&pdev->dev); xhci = hcd_to_xhci(hcd); /* USB 3.0 roothub */ /* no need for another instance of mxhci */ driver->hcd_priv_size = sizeof(struct xhci_hcd *); xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev, dev_name(&pdev->dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto remove_usb2_hcd; } hcd_to_bus(xhci->shared_hcd)->skip_resume = true; /* * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset) * is called by usb_add_hcd(). */ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; spin_lock_init(&mxhci->wakeup_lock); mxhci->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq"); if (mxhci->pwr_event_irq < 0) { dev_err(&pdev->dev, "platform_get_irq for pwr_event_irq failed\n"); goto remove_usb3_hcd; } ret = devm_request_irq(&pdev->dev, mxhci->pwr_event_irq, mxhci_hsic_pwr_event_irq, 0, "mxhci_hsic_pwr_evt", mxhci); if (ret) { dev_err(&pdev->dev, "request irq failed (pwr event irq)\n"); goto remove_usb3_hcd; } init_completion(&mxhci->phy_in_lpm); mxhci->wq = create_singlethread_workqueue("mxhci_wq"); if (!mxhci->wq) { dev_err(&pdev->dev, "unable to create workqueue\n"); ret = -ENOMEM; goto remove_usb3_hcd; } INIT_WORK(&mxhci->bus_vote_w, mxhci_hsic_bus_vote_w); mxhci->bus_scale_table = msm_bus_cl_get_pdata(pdev); if (!mxhci->bus_scale_table) { dev_dbg(&pdev->dev, "bus scaling is disabled\n"); } else { mxhci->bus_perf_client = msm_bus_scale_register_client(mxhci->bus_scale_table); /* Configure BUS performance parameters for MAX bandwidth */ if (mxhci->bus_perf_client) { mxhci->bus_vote = true; queue_work(mxhci->wq, &mxhci->bus_vote_w); } else { dev_err(&pdev->dev, "%s: bus scaling client reg err\n", __func__); ret = -ENODEV; goto delete_wq; } } ret = device_create_file(&pdev->dev, &dev_attr_config_imod); if (ret) dev_dbg(&pdev->dev, "%s: unable to create imod sysfs entry\n", __func__); /* Enable HSIC PHY */ mxhci_hsic_ulpi_write(mxhci, 0x01, MSM_HSIC_CFG_SET); device_init_wakeup(&pdev->dev, 1); wakeup_source_init(&mxhci->ws, dev_name(&pdev->dev)); pm_stay_awake(mxhci->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; delete_wq: destroy_workqueue(mxhci->wq); remove_usb3_hcd: usb_remove_hcd(xhci->shared_hcd); put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); remove_usb2_hcd: usb_remove_hcd(hcd); deinit_vddcx: mxhci_hsic_init_vddcx(mxhci, 0); deinit_clocks: mxhci_hsic_init_clocks(mxhci, 0); put_hcd: usb_put_hcd(hcd); return ret; }
static int cs75xx_gpio_probe(struct platform_device *pdev) { int i, j; char tmp_str[16]; struct resource *res_mem; gpio_dbgmsg("Function: %s, pdev->name = %s\n", __func__, pdev->name); memset(cs75xx_gpio_base, 0, sizeof(cs75xx_gpio_base)); /* get the module base address and irq number */ sprintf(tmp_str, "global"); res_mem = platform_get_resource_byname(pdev, IORESOURCE_IO, tmp_str); if (!res_mem) { gpio_dbgmsg("Func: %s - can't get resource %s\n", __func__, tmp_str); goto fail; } cs75xx_global_base = ioremap(res_mem->start, res_mem->end - res_mem->start + 1); if (!cs75xx_global_base) { gpio_dbgmsg("Func: %s - unable to remap %s %d memory \n", __func__, tmp_str, res_mem->end - res_mem->start + 1); goto fail; } gpio_dbgmsg("\tcs75xx_global_base = %p\n", cs75xx_global_base); for (i = 0; i < GPIO_BANK_NUM; i++) { sprintf(tmp_str, "gpio%d", i); res_mem = platform_get_resource_byname(pdev, IORESOURCE_IO, tmp_str); if (!res_mem) { gpio_dbgmsg("Func: %s - can't get resource %s\n", __func__, tmp_str); goto fail; } cs75xx_gpio_base[i] = ioremap(res_mem->start, res_mem->end - res_mem->start + 1); if (!cs75xx_gpio_base[i]) { gpio_dbgmsg("Func: %s - unable to remap %s %d memory \n", __func__, tmp_str, res_mem->end - res_mem->start + 1); goto fail; } gpio_dbgmsg("\tcs75xx_gpio_base[%d] = %p\n", i, cs75xx_gpio_base[i]); } for (i = 0; i < GPIO_BANK_NUM; i++) { sprintf(tmp_str, "irq_gpio%d", i); cs75xx_irq_gpio[i] = platform_get_irq_byname(pdev, tmp_str); if (cs75xx_irq_gpio[i] == -ENXIO) { gpio_dbgmsg("Func: %s - can't get resource %s\n", __func__, tmp_str); goto fail; } gpio_dbgmsg("\tcs75xx_irq_gpio[%d] = %08x\n", i, cs75xx_irq_gpio[i]); } /* disable irq and register to gpiolib */ for (i = 0; i < GPIO_BANK_NUM; i++) { /* disable, unmask and clear all interrupts */ __raw_writel(0x0, cs75xx_gpio_base[i] + CS75XX_GPIO_IE); for (j = GPIO_IRQ_BASE + i * GPIO_BANK_SIZE; j < GPIO_IRQ_BASE + (i + 1) * GPIO_BANK_SIZE; j++) { irq_set_chip(j, &cs75xx_gpio_irq_chip); irq_set_handler(j, handle_edge_irq); set_irq_flags(j, IRQF_VALID); } irq_set_chained_handler(cs75xx_irq_gpio[i], cs75xx_gpio_irq_handler); irq_set_handler_data(cs75xx_irq_gpio[i], (void *)i); } BUG_ON(gpiochip_add(&cs75xx_gpio_chip)); return 0; fail: for (i = 0; i < GPIO_BANK_NUM; i++) if (cs75xx_gpio_base[i]) iounmap(cs75xx_gpio_base[i]); return -ENODEV; }
static int da9055_onkey_probe(struct platform_device *pdev) { struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent); struct da9055_onkey *onkey; struct input_dev *input_dev; int irq, err; irq = platform_get_irq_byname(pdev, "ONKEY"); if (irq < 0) { dev_err(&pdev->dev, "Failed to get an IRQ for input device, %d\n", irq); return -EINVAL; } onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL); if (!onkey) { dev_err(&pdev->dev, "Failed to allocate memory\n"); return -ENOMEM; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "Failed to allocate memory\n"); return -ENOMEM; } onkey->input = input_dev; onkey->da9055 = da9055; input_dev->name = "da9055-onkey"; input_dev->phys = "da9055-onkey/input0"; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY); __set_bit(KEY_POWER, input_dev->keybit); INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work); irq = regmap_irq_get_virq(da9055->irq_data, irq); err = request_threaded_irq(irq, NULL, da9055_onkey_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "ONKEY", onkey); if (err < 0) { dev_err(&pdev->dev, "Failed to register ONKEY IRQ %d, error = %d\n", irq, err); goto err_free_input; } err = input_register_device(input_dev); if (err) { dev_err(&pdev->dev, "Unable to register input device, %d\n", err); goto err_free_irq; } platform_set_drvdata(pdev, onkey); return 0; err_free_irq: free_irq(irq, onkey); cancel_delayed_work_sync(&onkey->work); err_free_input: input_free_device(input_dev); return err; }
static int hisi_pmic_otmp_mntn_initial(struct platform_device *pdev, PMIC_MNTN_DESC *pmic_mntn) { struct device_node *root = NULL; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; unsigned char reg_value = 0; s32 ret = 0; root = of_find_compatible_node(np, NULL, "hisilicon,pmic-mntn-otmp"); if (!root) { dev_err(dev, "[%s]no hisilicon,pmic-mntn-otmp root node\n", __func__); return -ENODEV; } ret |= of_property_read_u32_array(root, "hisilicon,otmp-threshold-val", (u32 *)&pmic_mntn->otmp_thshd_val, 0x1); ret |= of_property_read_u32_array(root, "hisilicon,otmp-threshold-reg", (u32 *)&pmic_mntn->otmp_thshd_reg, 0x3); if (ret) { dev_err(dev, "[%s]get pmic otmp attribute failed.\n", __func__); return -ENODEV; } ret |= of_property_read_u32_array(root, "hisilicon,otmp-hreset-pwrdown-flag", (u32 *)&pmic_mntn->otmp_hreset_pwrdown_flag, 0x1); ret |= of_property_read_u32_array(root, "hisilicon,otmp-hreset-pwrdown-val", (u32 *)&pmic_mntn->otmp_hreset_pwrdown_val, 0x1); ret |= of_property_read_u32_array(root, "hisilicon,otmp-hreset-pwrdown-reg", (u32 *)&pmic_mntn->otmp_hreset_pwrdown_reg, 0x3); if (ret) { dev_err(dev, "[%s]get pmic otmp attribute failed.\n", __func__); return -ENODEV; } root = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); if (!root) { dev_err("%s: hisilicon,sysctrl No compatible node found\n", __func__); return -ENODEV; } g_sysctrl_base = of_iomap(root, 0); if (!g_sysctrl_base) { dev_err("%s: hisilicon,sysctrl_base is NULL\n", __func__); return -ENODEV; } /*Set the otmp threshold*/ reg_value = hisi_pmic_reg_read(pmic_mntn->otmp_thshd_reg.addr); SET_REG_BIT(reg_value, pmic_mntn->otmp_thshd_reg.shift, pmic_mntn->otmp_thshd_reg.mask, pmic_mntn->otmp_thshd_val); hisi_pmic_reg_write(pmic_mntn->otmp_thshd_reg.addr, reg_value); pmic_mntn->otmp_irq = platform_get_irq_byname(pdev, "otmp"); if (pmic_mntn->ocp_irq < 0) { dev_err(dev, "[%s]platform_get_irq_byname otmp_irq failed.\n", __func__); return -ENODEV; } pmic_mntn->otmp_wq = create_singlethread_workqueue("pmic-otmp-wq"); INIT_WORK(&pmic_mntn->otmp_wk, (void *)hisi_pmic_otmp_wk_handler); ret = devm_request_irq(dev, pmic_mntn->otmp_irq, hisi_pmic_otmp_irq_handler, IRQF_DISABLED|IRQF_NO_SUSPEND, "pmic-otmp-irq", (void *)pmic_mntn); if (ret) { dev_err(dev, "[%s] request_irq otmp_irq err\n", __func__); return -ENODEV; } return 0; }
int sprd6500_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata) { int ret = 0; struct platform_device *pdev; mc->gpio_cp_on = pdata->gpio_cp_on; mc->gpio_reset_req_n = pdata->gpio_reset_req_n; mc->gpio_cp_reset = pdata->gpio_cp_reset; mc->gpio_pda_active = pdata->gpio_pda_active; mc->gpio_phone_active = pdata->gpio_phone_active; mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int; mc->gpio_flm_uart_sel = pdata->gpio_flm_uart_sel; mc->gpio_cp_warm_reset = pdata->gpio_cp_warm_reset; mc->gpio_sim_detect = pdata->gpio_sim_detect; #if defined(CONFIG_LINK_DEVICE_PLD) mc->gpio_fpga1_cs_n = pdata->gpio_fpga1_cs_n; #endif gpio_set_value(mc->gpio_cp_reset, 0); gpio_set_value(mc->gpio_cp_on, 0); pdev = to_platform_device(mc->dev); mc->irq_phone_active = platform_get_irq_byname(pdev, "cp_active_irq"); pr_info("[MODEM_IF:ESC] <%s> PHONE_ACTIVE IRQ# = %d\n", __func__, mc->irq_phone_active); sprd6500_get_ops(mc); if (mc->irq_phone_active) { ret = request_irq(mc->irq_phone_active, phone_active_irq_handler, IRQF_TRIGGER_HIGH, "esc_active", mc); if (ret) { pr_err("[MODEM_IF:ESC] <%s> failed to request_irq IRQ# %d (err=%d)\n", __func__, mc->irq_phone_active, ret); return ret; } ret = enable_irq_wake(mc->irq_phone_active); if (ret) { pr_err("[MODEM_IF:ESC] %s: failed to enable_irq_wake IRQ# %d (err=%d)\n", __func__, mc->irq_phone_active, ret); free_irq(mc->irq_phone_active, mc); return ret; } } #if defined(CONFIG_SIM_DETECT) mc->irq_sim_detect = platform_get_irq_byname(pdev, "sim_irq"); pr_info("[MODEM_IF:ESC] <%s> SIM_DECTCT IRQ# = %d\n", __func__, mc->irq_sim_detect); if (mc->irq_sim_detect) { ret = request_irq(mc->irq_sim_detect, sim_detect_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "esc_sim_detect", mc); if (ret) { mif_err("failed to request_irq: %d\n", ret); mc->sim_state.online = false; mc->sim_state.changed = false; return ret; } ret = enable_irq_wake(mc->irq_sim_detect); if (ret) { mif_err("failed to enable_irq_wake: %d\n", ret); free_irq(mc->irq_sim_detect, mc); mc->sim_state.online = false; mc->sim_state.changed = false; return ret; } /* initialize sim_state => insert: gpio=0, remove: gpio=1 */ mc->sim_state.online = !gpio_get_value(mc->gpio_sim_detect); } #endif return ret; }
/* * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. * 730 has only 2 McBSP, and both of them are MPU peripherals. */ static int __devinit omap_mcbsp_probe(struct platform_device *pdev) { struct omap_mcbsp_platform_data *pdata = pdev->dev.platform_data; struct omap_mcbsp *mcbsp; int id = pdev->id - 1; struct resource *res; int ret = 0; if (!pdata) { dev_err(&pdev->dev, "McBSP device initialized without" "platform data\n"); ret = -EINVAL; goto exit; } dev_dbg(&pdev->dev, "Initializing OMAP McBSP (%d).\n", pdev->id); if (id >= omap_mcbsp_count) { dev_err(&pdev->dev, "Invalid McBSP device id (%d)\n", id); ret = -EINVAL; goto exit; } mcbsp = kzalloc(sizeof(struct omap_mcbsp), GFP_KERNEL); if (!mcbsp) { ret = -ENOMEM; goto exit; } spin_lock_init(&mcbsp->lock); mcbsp->id = id + 1; mcbsp->free = true; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu"); if (!res) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "%s:mcbsp%d has invalid memory" "resource\n", __func__, pdev->id); ret = -ENOMEM; goto exit; } } mcbsp->phys_base = res->start; omap_mcbsp_cache_size = resource_size(res); mcbsp->io_base = ioremap(res->start, resource_size(res)); if (!mcbsp->io_base) { ret = -ENOMEM; goto err_ioremap; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); if (!res) mcbsp->phys_dma_base = mcbsp->phys_base; else mcbsp->phys_dma_base = res->start; mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx"); mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx"); /* From OMAP4 there will be a single irq line */ if (mcbsp->tx_irq == -ENXIO) mcbsp->tx_irq = platform_get_irq(pdev, 0); res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { dev_err(&pdev->dev, "%s:mcbsp%d has invalid rx DMA channel\n", __func__, pdev->id); ret = -ENODEV; goto err_res; } mcbsp->dma_rx_sync = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!res) { dev_err(&pdev->dev, "%s:mcbsp%d has invalid tx DMA channel\n", __func__, pdev->id); ret = -ENODEV; goto err_res; } mcbsp->dma_tx_sync = res->start; mcbsp->fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR(mcbsp->fclk)) { ret = PTR_ERR(mcbsp->fclk); dev_err(&pdev->dev, "unable to get fck: %d\n", ret); goto err_res; } mcbsp->pdata = pdata; mcbsp->dev = &pdev->dev; mcbsp_ptr[id] = mcbsp; mcbsp->mcbsp_config_type = pdata->mcbsp_config_type; platform_set_drvdata(pdev, mcbsp); pm_runtime_enable(mcbsp->dev); /* Initialize mcbsp properties for OMAP34XX if needed / applicable */ omap34xx_device_init(mcbsp); return 0; err_res: iounmap(mcbsp->io_base); err_ioremap: kfree(mcbsp); exit: return ret; }
static int msm_iommu_probe(struct platform_device *pdev) { struct iommu_pmon *pmon_info; struct msm_iommu_drvdata *drvdata; struct resource *r; int ret, needs_alt_core_clk, needs_alt_iface_clk; int global_cfg_irq, global_client_irq; u32 temp; drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base"); if (!r) return -EINVAL; drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!drvdata->base) return -ENOMEM; drvdata->phys_base = r->start; if (IS_ENABLED(CONFIG_MSM_IOMMU_VBIF_CHECK)) { drvdata->vbif_base = ioremap(drvdata->phys_base - (phys_addr_t) 0x4000, 0x1000); WARN_ON_ONCE(!drvdata->vbif_base); } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smmu_local_base"); if (r) { drvdata->smmu_local_base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!drvdata->smmu_local_base) return -ENOMEM; } drvdata->glb_base = drvdata->base; if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-mmu-500")) drvdata->model = MMU_500; if (of_get_property(pdev->dev.of_node, "vdd-supply", NULL)) { drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(drvdata->gdsc)) return PTR_ERR(drvdata->gdsc); drvdata->alt_gdsc = devm_regulator_get(&pdev->dev, "qcom,alt-vdd"); if (IS_ERR(drvdata->alt_gdsc)) drvdata->alt_gdsc = NULL; } else { pr_debug("Warning: No regulator specified for IOMMU\n"); } drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(drvdata->pclk)) { ret = PTR_ERR(drvdata->pclk); drvdata->pclk = NULL; goto fail; } ret = clk_prepare(drvdata->pclk); if (ret) return ret; drvdata->clk = devm_clk_get(&pdev->dev, "core_clk"); if (IS_ERR(drvdata->clk)) { ret = PTR_ERR(drvdata->clk); drvdata->clk = NULL; goto fail; } ret = clk_prepare(drvdata->clk); if (ret) goto fail; needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node, "qcom,needs-alt-core-clk"); if (needs_alt_core_clk) { drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk"); if (IS_ERR(drvdata->aclk)) { ret = PTR_ERR(drvdata->aclk); drvdata->aclk = NULL; goto fail; } ret = clk_prepare(drvdata->aclk); if (ret) goto fail; } needs_alt_iface_clk = of_property_read_bool(pdev->dev.of_node, "qcom,needs-alt-iface-clk"); if (needs_alt_iface_clk) { drvdata->aiclk = devm_clk_get(&pdev->dev, "alt_iface_clk"); if (IS_ERR(drvdata->aiclk)) { ret = PTR_ERR(drvdata->aiclk); drvdata->aiclk = NULL; goto fail; } ret = clk_prepare(drvdata->aiclk); if (ret) goto fail; } if (!of_property_read_u32(pdev->dev.of_node, "qcom,cb-base-offset", &temp)) drvdata->cb_base = drvdata->base + temp; else drvdata->cb_base = drvdata->base + 0x8000; if (clk_get_rate(drvdata->clk) == 0) { ret = clk_round_rate(drvdata->clk, 1000); clk_set_rate(drvdata->clk, ret); } if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) { ret = clk_round_rate(drvdata->aclk, 1000); clk_set_rate(drvdata->aclk, ret); } if (drvdata->aiclk && clk_get_rate(drvdata->aiclk) == 0) { ret = clk_round_rate(drvdata->aiclk, 1000); clk_set_rate(drvdata->aiclk, ret); } ret = msm_iommu_parse_dt(pdev, drvdata); if (ret) return ret; dev_info(&pdev->dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n", drvdata->name, drvdata->model, drvdata->base, drvdata->ncb); platform_set_drvdata(pdev, drvdata); pmon_info = msm_iommu_pm_alloc(&pdev->dev); if (pmon_info != NULL) { ret = msm_iommu_pmon_parse_dt(pdev, pmon_info); if (ret) { msm_iommu_pm_free(&pdev->dev); pr_info("%s: pmon not available.\n", drvdata->name); } else { pmon_info->iommu.base = drvdata->base; pmon_info->iommu.ops = msm_get_iommu_access_ops(); pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1(); pmon_info->iommu.iommu_name = drvdata->name; ret = msm_iommu_pm_iommu_register(pmon_info); if (ret) { pr_err("%s iommu register fail\n", drvdata->name); msm_iommu_pm_free(&pdev->dev); } else { pr_debug("%s iommu registered for pmon\n", pmon_info->iommu.iommu_name); } } } global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq"); if (global_cfg_irq > 0) { ret = devm_request_threaded_irq(&pdev->dev, global_cfg_irq, NULL, msm_iommu_global_fault_handler, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING, "msm_iommu_global_cfg_irq", pdev); if (ret < 0) pr_err("Request Global CFG IRQ %d failed with ret=%d\n", global_cfg_irq, ret); } global_client_irq = platform_get_irq_byname(pdev, "global_client_NS_irq"); if (global_client_irq > 0) { ret = devm_request_threaded_irq(&pdev->dev, global_client_irq, NULL, msm_iommu_global_fault_handler, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING, "msm_iommu_global_client_irq", pdev); if (ret < 0) pr_err("Request Global Client IRQ %d failed with ret=%d\n", global_client_irq, ret); } ret = of_platform_populate(pdev->dev.of_node, msm_iommu_ctx_match_table, NULL, &pdev->dev); fail: if (ret) { clk_unprepare(drvdata->clk); clk_unprepare(drvdata->pclk); clk_unprepare(drvdata->aclk); clk_unprepare(drvdata->aiclk); pr_err("Failed to create iommu context device\n"); } return ret; }
/** * dwc3_otg_sm_work - workqueue function. * * @w: Pointer to the dwc3 otg workqueue * * NOTE: After any change in phy->state, * we must reschdule the state machine. */ static void dwc3_otg_sm_work(struct work_struct *w) { struct dwc3_otg *dotg = container_of(w, struct dwc3_otg, sm_work.work); struct usb_phy *phy = dotg->otg.phy; struct dwc3_charger *charger = dotg->charger; bool work = 0; int ret = 0; unsigned long delay = 0; pm_runtime_resume(phy->dev); dev_dbg(phy->dev, "%s state\n", otg_state_string(phy->state)); /* Check OTG state */ switch (phy->state) { case OTG_STATE_UNDEFINED: dwc3_otg_init_sm(dotg); if (!dotg->psy) { dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) dev_err(phy->dev, "couldn't get usb power supply\n"); } /* Switch to A or B-Device according to ID / BSV */ if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); phy->state = OTG_STATE_B_IDLE; work = 1; } else { phy->state = OTG_STATE_B_IDLE; dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_IDLE: if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; dotg->charger_retry_count = 0; if (charger) { if (charger->chg_type == DWC3_INVALID_CHARGER) charger->start_detection(dotg->charger, false); else charger->chg_type = DWC3_INVALID_CHARGER; } } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); if (charger) { /* Has charger been detected? If no detect it */ switch (charger->chg_type) { case DWC3_DCP_CHARGER: case DWC3_PROPRIETARY_CHARGER: dev_dbg(phy->dev, "lpm, DCP charger\n"); dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); pm_runtime_put_sync(phy->dev); break; case DWC3_CDP_CHARGER: dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; work = 1; break; case DWC3_SDP_CHARGER: dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; pr_info("DWC3_SDP_CHARGER\n"); work = 1; break; case DWC3_FLOATED_CHARGER: if (dotg->charger_retry_count < max_chgr_retry_count) dotg->charger_retry_count++; /* * In case of floating charger, if * retry count equal to max retry count * notify PMIC about floating charger * and put Hw in low power mode. Else * perform charger detection again by * calling start_detection() with false * and then with true argument. */ if (dotg->charger_retry_count == max_chgr_retry_count) { dwc3_otg_set_power(phy, 0); pm_runtime_put_sync(phy->dev); break; } charger->start_detection(dotg->charger, false); default: dev_dbg(phy->dev, "chg_det started\n"); charger->start_detection(charger, true); break; } } else { /* no charger registered, start peripheral */ if (dwc3_otg_start_peripheral(&dotg->otg, 1)) { /* * Probably set_peripheral not called * yet. We will re-try as soon as it * will be called */ dev_err(phy->dev, "enter lpm as\n" "unable to start B-device\n"); phy->state = OTG_STATE_UNDEFINED; pm_runtime_put_sync(phy->dev); return; } } } else { if (charger) charger->start_detection(dotg->charger, false); dotg->charger_retry_count = 0; dwc3_otg_set_power(phy, 0); dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_PERIPHERAL: #ifdef CONFIG_PANTECH_USB_BLOCKING_MDMSTATE if (0 < get_pantech_mdm_state()) dwc3_otg_set_power(phy, DWC3_BLOCKING_USB_MDMSTATE_MAX); #endif #ifndef CONFIG_PANTECH_SIO_BUG_FIX if (!test_bit(B_SESS_VLD, &dotg->inputs) || !test_bit(ID, &dotg->inputs)) { #else if (!test_bit(B_SESS_VLD, &dotg->inputs)) { #endif dev_dbg(phy->dev, "!id || !bsv\n"); dwc3_otg_start_peripheral(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; if (charger) charger->chg_type = DWC3_INVALID_CHARGER; work = 1; } break; case OTG_STATE_A_IDLE: /* Switch to A-Device*/ if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); phy->state = OTG_STATE_B_IDLE; dotg->vbus_retry_count = 0; work = 1; } else { phy->state = OTG_STATE_A_HOST; ret = dwc3_otg_start_host(&dotg->otg, 1); if ((ret == -EPROBE_DEFER) && dotg->vbus_retry_count < 3) { /* * Get regulator failed as regulator driver is * not up yet. Will try to start host after 1sec */ phy->state = OTG_STATE_A_IDLE; dev_dbg(phy->dev, "Unable to get vbus regulator. Retrying...\n"); delay = VBUS_REG_CHECK_DELAY; work = 1; dotg->vbus_retry_count++; } else if (ret) { /* * Probably set_host was not called yet. * We will re-try as soon as it will be called */ dev_dbg(phy->dev, "enter lpm as\n" "unable to start A-device\n"); phy->state = OTG_STATE_A_IDLE; pm_runtime_put_sync(phy->dev); return; } } break; case OTG_STATE_A_HOST: if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); #ifdef CONFIG_PANTECH_SIO_BUG_FIX /* FIXME : If OTG cable is disconnecting, below process is must completed * before pm_runtime_suspend. * So we are ignored pm_runtime_suspend request. * LS4-USB tarial */ pm_runtime_get_noresume(phy->dev); #endif dwc3_otg_start_host(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; dotg->vbus_retry_count = 0; work = 1; #ifdef CONFIG_PANTECH_SIO_BUG_FIX pm_runtime_put_noidle(phy->dev); #endif } break; default: dev_err(phy->dev, "%s: invalid otg-state\n", __func__); } if (work) queue_delayed_work(system_nrt_wq, &dotg->sm_work, delay); } /** * dwc3_otg_reset - reset dwc3 otg registers. * * @w: Pointer to the dwc3 otg workqueue */ static void dwc3_otg_reset(struct dwc3_otg *dotg) { static int once; struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv; /* * OCFG[2] - OTG-Version = 1 * OCFG[1] - HNPCap = 0 * OCFG[0] - SRPCap = 0 */ if (ext_xceiv && !ext_xceiv->otg_capability) dwc3_writel(dotg->regs, DWC3_OCFG, 0x4); /* * OCTL[6] - PeriMode = 1 * OCTL[5] - PrtPwrCtl = 0 * OCTL[4] - HNPReq = 0 * OCTL[3] - SesReq = 0 * OCTL[2] - TermSelDLPulse = 0 * OCTL[1] - DevSetHNPEn = 0 * OCTL[0] - HstSetHNPEn = 0 */ if (!once) { if (ext_xceiv && !ext_xceiv->otg_capability) dwc3_writel(dotg->regs, DWC3_OCTL, 0x40); once++; } /* Clear all otg events (interrupts) indications */ dwc3_writel(dotg->regs, DWC3_OEVT, 0xFFFF); /* Enable ID/BSV StsChngEn event*/ if (ext_xceiv && !ext_xceiv->otg_capability) dwc3_writel(dotg->regs, DWC3_OEVTEN, DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT | DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT); } /** * dwc3_otg_init - Initializes otg related registers * @dwc: Pointer to out controller context structure * * Returns 0 on success otherwise negative errno. */ int dwc3_otg_init(struct dwc3 *dwc) { u32 reg; int ret = 0; struct dwc3_otg *dotg; dev_dbg(dwc->dev, "dwc3_otg_init\n"); /* * GHWPARAMS6[10] bit is SRPSupport. * This bit also reflects DWC_USB3_EN_OTG */ reg = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); if (!(reg & DWC3_GHWPARAMS6_SRP_SUPPORT)) { /* * No OTG support in the HW core. * We return 0 to indicate no error, since this is acceptable * situation, just continue probe the dwc3 driver without otg. */ dev_dbg(dwc->dev, "dwc3_otg address space is not supported\n"); return 0; } /* Allocate and init otg instance */ dotg = kzalloc(sizeof(struct dwc3_otg), GFP_KERNEL); if (!dotg) { dev_err(dwc->dev, "unable to allocate dwc3_otg\n"); return -ENOMEM; } /* DWC3 has separate IRQ line for OTG events (ID/BSV etc.) */ dotg->irq = platform_get_irq_byname(to_platform_device(dwc->dev), "otg_irq"); if (dotg->irq < 0) { dev_err(dwc->dev, "%s: missing OTG IRQ\n", __func__); ret = -ENODEV; goto err1; } dotg->regs = dwc->regs; dotg->otg.set_peripheral = dwc3_otg_set_peripheral; dotg->otg.set_host = dwc3_otg_set_host; /* This reference is used by dwc3 modules for checking otg existance */ dwc->dotg = dotg; dotg->otg.phy = kzalloc(sizeof(struct usb_phy), GFP_KERNEL); if (!dotg->otg.phy) { dev_err(dwc->dev, "unable to allocate dwc3_otg.phy\n"); ret = -ENOMEM; goto err1; } dotg->dwc = dwc; dotg->otg.phy->otg = &dotg->otg; dotg->otg.phy->dev = dwc->dev; dotg->otg.phy->set_power = dwc3_otg_set_power; dotg->otg.phy->set_suspend = dwc3_otg_set_suspend; ret = usb_set_transceiver(dotg->otg.phy); if (ret) { dev_err(dotg->otg.phy->dev, "%s: failed to set transceiver, already exists\n", __func__); goto err2; } dotg->otg.phy->state = OTG_STATE_UNDEFINED; init_completion(&dotg->dwc3_xcvr_vbus_init); INIT_DELAYED_WORK(&dotg->sm_work, dwc3_otg_sm_work); ret = request_irq(dotg->irq, dwc3_otg_interrupt, IRQF_SHARED, "dwc3_otg", dotg); if (ret) { dev_err(dotg->otg.phy->dev, "failed to request irq #%d --> %d\n", dotg->irq, ret); goto err3; } pm_runtime_get(dwc->dev); return 0; err3: cancel_delayed_work_sync(&dotg->sm_work); usb_set_transceiver(NULL); err2: kfree(dotg->otg.phy); err1: dwc->dotg = NULL; kfree(dotg); return ret; }
static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data; struct omap_temp_sensor *temp_sensor; struct resource *mem; int ret = 0, val; if (!pdata) { dev_err(dev, "%s: platform data missing\n", __func__); return -EINVAL; } temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL); if (!temp_sensor) return -ENOMEM; spin_lock_init(&temp_sensor->lock); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "%s:no mem resource\n", __func__); ret = -EINVAL; goto plat_res_err; } temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert"); if (temp_sensor->irq < 0) { dev_err(dev, "%s:Cannot get thermal alert irq\n", __func__); ret = -EINVAL; goto get_irq_err; } ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN, "thermal_shutdown"); if (ret) { dev_err(dev, "%s: Could not get tshut_gpio\n", __func__); goto tshut_gpio_req_err; } temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO); if (temp_sensor->tshut_irq < 0) { dev_err(dev, "%s:Cannot get thermal shutdown irq\n", __func__); ret = -EINVAL; goto get_tshut_irq_err; } temp_sensor->phy_base = pdata->offset; temp_sensor->pdev = pdev; temp_sensor->dev = dev; pm_runtime_enable(dev); pm_runtime_irq_safe(dev); /* * check if the efuse has a non-zero value if not * it is an untrimmed sample and the temperatures * may not be accurate */ if (omap_readl(OMAP4_CTRL_MODULE_CORE + OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP)) temp_sensor->is_efuse_valid = 1; temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck"); if (IS_ERR(temp_sensor->clock)) { ret = PTR_ERR(temp_sensor->clock); pr_err("%s:Unable to get fclk: %d\n", __func__, ret); ret = -EINVAL; goto clk_get_err; } /* Init delayed work for throttle decision */ INIT_DELAYED_WORK(&temp_sensor->throttle_work, throttle_delayed_work_fn); platform_set_drvdata(pdev, temp_sensor); ret = omap_temp_sensor_enable(temp_sensor); if (ret) { dev_err(dev, "%s:Cannot enable temp sensor\n", __func__); goto sensor_enable_err; } omap_enable_continuous_mode(temp_sensor); omap_configure_temp_sensor_thresholds(temp_sensor); /* 1 ms */ omap_configure_temp_sensor_counter(temp_sensor, 1); /* Wait till the first conversion is done wait for at least 1ms */ mdelay(2); /* Read the temperature once due to hw issue*/ omap_read_current_temp(temp_sensor); /* Set 2 seconds time as default counter */ omap_configure_temp_sensor_counter(temp_sensor, temp_sensor->clk_rate * 2); ret = request_threaded_irq(temp_sensor->irq, NULL, omap_talert_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "temp_sensor", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed.\n"); goto req_irq_err; } ret = request_threaded_irq(temp_sensor->tshut_irq, NULL, omap_tshut_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "tshut", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed for TSHUT.\n"); goto tshut_irq_req_err; } ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group); if (ret) { dev_err(&pdev->dev, "could not create sysfs files\n"); goto sysfs_create_err; } /* unmask the T_COLD and unmask T_HOT at init */ val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); val |= OMAP4_MASK_COLD_MASK; val |= OMAP4_MASK_HOT_MASK; omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET); dev_info(dev, "%s probed", pdata->name); temp_sensor_pm = temp_sensor; return 0; sysfs_create_err: free_irq(temp_sensor->tshut_irq, temp_sensor); cancel_delayed_work_sync(&temp_sensor->throttle_work); tshut_irq_req_err: free_irq(temp_sensor->irq, temp_sensor); req_irq_err: platform_set_drvdata(pdev, NULL); omap_temp_sensor_disable(temp_sensor); sensor_enable_err: clk_put(temp_sensor->clock); clk_get_err: pm_runtime_disable(dev); get_tshut_irq_err: gpio_free(OMAP_TSHUT_GPIO); tshut_gpio_req_err: get_irq_err: plat_res_err: kfree(temp_sensor); return ret; }
static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) { struct sh_mobile_sdhi *priv; struct tmio_mmc_data *mmc_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct tmio_mmc_host *host; char clk_name[8]; int irq, ret, i = 0; bool multiplexed_isr = true; priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); if (priv == NULL) { dev_err(&pdev->dev, "kzalloc failed\n"); return -ENOMEM; } mmc_data = &priv->mmc_data; p->pdata = mmc_data; if (p->init) { ret = p->init(pdev, &sdhi_ops); if (ret) goto einit; } snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); priv->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(priv->clk); goto eclkget; } mmc_data->hclk = clk_get_rate(priv->clk); mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; mmc_data->get_cd = sh_mobile_sdhi_get_cd; mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; if (p) { mmc_data->flags = p->tmio_flags; if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->capabilities |= p->tmio_caps; mmc_data->cd_gpio = p->cd_gpio; if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { priv->param_tx.slave_id = p->dma_slave_tx; priv->param_rx.slave_id = p->dma_slave_rx; priv->dma_priv.chan_priv_tx = &priv->param_tx; priv->dma_priv.chan_priv_rx = &priv->param_rx; priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ mmc_data->dma = &priv->dma_priv; } } /* * All SDHI blocks support 2-byte and larger block sizes in 4-bit * bus width mode. */ mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; /* * All SDHI blocks support SDIO IRQ signalling. */ mmc_data->flags |= TMIO_MMC_SDIO_IRQ; ret = tmio_mmc_host_probe(&host, pdev, mmc_data); if (ret < 0) goto eprobe; /* * Allow one or more specific (named) ISRs or * one or more multiplexed (un-named) ISRs. */ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_card_detect; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_sdio_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_sdio; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_sdcard; } else if (!multiplexed_isr) { dev_err(&pdev->dev, "Principal SD-card IRQ is missing among named interrupts\n"); ret = irq; goto eirq_sdcard; } if (multiplexed_isr) { while (1) { irq = platform_get_irq(pdev, i); if (irq < 0) break; i++; ret = request_irq(irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_multiplexed; } /* There must be at least one IRQ source */ if (!i) goto eirq_multiplexed; } dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", mmc_hostname(host->mmc), (unsigned long) (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), mmc_data->hclk / 1000000); return ret; eirq_multiplexed: while (i--) { irq = platform_get_irq(pdev, i); free_irq(irq, host); } eirq_sdcard: irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); if (irq >= 0) free_irq(irq, host); eirq_sdio: irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); if (irq >= 0) free_irq(irq, host); eirq_card_detect: tmio_mmc_host_remove(host); eprobe: clk_put(priv->clk); eclkget: if (p->cleanup) p->cleanup(pdev); einit: kfree(priv); return ret; }
static int da9063_regulator_probe(struct platform_device *pdev) { struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent); struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev); struct of_regulator_match *da9063_reg_matches = NULL; struct da9063_regulators_pdata *regl_pdata; const struct da9063_dev_model *model; struct da9063_regulators *regulators; struct da9063_regulator *regl; struct regulator_config config; bool bcores_merged, bmem_bio_merged; int id, irq, n, n_regulators, ret, val; size_t size; regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL; if (!regl_pdata) regl_pdata = da9063_parse_regulators_dt(pdev, &da9063_reg_matches); if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) { dev_err(&pdev->dev, "No regulators defined for the platform\n"); return PTR_ERR(regl_pdata); } /* Find regulators set for particular device model */ for (model = regulators_models; model->regulator_info; model++) { if (model->dev_model == da9063->model) break; } if (!model->regulator_info) { dev_err(&pdev->dev, "Chip model not recognised (%u)\n", da9063->model); return -ENODEV; } ret = regmap_read(da9063->regmap, DA9063_REG_CONFIG_H, &val); if (ret < 0) { dev_err(&pdev->dev, "Error while reading BUCKs configuration\n"); return ret; } bcores_merged = val & DA9063_BCORE_MERGE; bmem_bio_merged = val & DA9063_BUCK_MERGE; n_regulators = model->n_regulators; if (bcores_merged) n_regulators -= 2; /* remove BCORE1, BCORE2 */ else n_regulators--; /* remove BCORES_MERGED */ if (bmem_bio_merged) n_regulators -= 2; /* remove BMEM, BIO */ else n_regulators--; /* remove BMEM_BIO_MERGED */ /* Allocate memory required by usable regulators */ size = sizeof(struct da9063_regulators) + n_regulators * sizeof(struct da9063_regulator); regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!regulators) return -ENOMEM; regulators->n_regulators = n_regulators; platform_set_drvdata(pdev, regulators); /* Register all regulators declared in platform information */ n = 0; id = 0; while (n < regulators->n_regulators) { /* Skip regulator IDs depending on merge mode configuration */ switch (id) { case DA9063_ID_BCORE1: case DA9063_ID_BCORE2: if (bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM: case DA9063_ID_BIO: if (bmem_bio_merged) { id++; continue; } break; case DA9063_ID_BCORES_MERGED: if (!bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM_BIO_MERGED: if (!bmem_bio_merged) { id++; continue; } break; } /* Initialise regulator structure */ regl = ®ulators->regulator[n]; regl->hw = da9063; regl->info = &model->regulator_info[id]; regl->desc = regl->info->desc; regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; if (regl->info->mode.reg) regl->mode = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->mode); if (regl->info->suspend.reg) regl->suspend = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend); if (regl->info->sleep.reg) regl->sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->sleep); if (regl->info->suspend_sleep.reg) regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend_sleep); if (regl->info->ilimit.reg) regl->ilimit = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->ilimit); /* Register regulator */ memset(&config, 0, sizeof(config)); config.dev = &pdev->dev; config.init_data = da9063_get_regulator_initdata(regl_pdata, id); config.driver_data = regl; if (da9063_reg_matches) config.of_node = da9063_reg_matches[id].of_node; config.regmap = da9063->regmap; regl->rdev = devm_regulator_register(&pdev->dev, ®l->desc, &config); if (IS_ERR(regl->rdev)) { dev_err(&pdev->dev, "Failed to register %s regulator\n", regl->desc.name); return PTR_ERR(regl->rdev); } id++; n++; } /* LDOs overcurrent event support */ irq = platform_get_irq_byname(pdev, "LDO_LIM"); if (irq < 0) { dev_err(&pdev->dev, "Failed to get IRQ.\n"); return irq; } ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, da9063_ldo_lim_event, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "LDO_LIM", regulators); if (ret) { dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n"); return ret; } return 0; }
static int pmic_usb_det_drv_probe(struct platform_device *pdev) { struct pmic_usb_det_data *pdata; struct usb_phy *otg_handle; int ret; if (!pdev) return -EINVAL; pdata = devm_kzalloc(&pdev->dev, sizeof(struct pmic_usb_det_data), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "%s: out of memory\n", __func__); return -ENOMEM; } pdata->pdev = &pdev->dev; pdata->vbus_state = -1; pdata->cable_type = POWER_SUPPLY_CHARGER_TYPE_NONE; pdata->usbid_state = -1; pdata->irq_ctyp = -ENXIO; pdata->irq_vbusdet = -ENXIO; pdata->irqwake_vbusdet = false; pdata->irqwake_ctyp = false; pdata->irqwake_idflt = false; pdata->irqwake_idgnd = false; platform_set_drvdata(pdev, pdata); /* Get USB phy */ otg_handle = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(otg_handle)) { dev_err(&pdev->dev, "%s: fail to get OTG transceiver\n", __func__); return -EBUSY; } pdata->otg_handle = otg_handle; /* Get interrupt from device tree */ pdata->irq_vbusdet = platform_get_irq_byname(pdev, "vbusdet"); pdata->irq_ctyp = platform_get_irq_byname(pdev, "ctype"); pdata->irq_idflt = platform_get_irq_byname(pdev, "usbidflt"); pdata->irq_idgnd = platform_get_irq_byname(pdev, "usbidgnd"); if (IS_ERR_VALUE(pdata->irq_vbusdet) || IS_ERR_VALUE(pdata->irq_ctyp) || IS_ERR_VALUE(pdata->irq_idflt) || IS_ERR_VALUE(pdata->irq_idgnd)) { ret = -EBUSY; dev_err(&pdev->dev, "%s: can't get irq, vbus=%d ctyp=%d idflt=%d idgnd=%d\n", __func__, pdata->irq_vbusdet, pdata->irq_ctyp, pdata->irq_idflt, pdata->irq_idgnd); return ret; } ret = pmic_usb_setup_det_irq(pdata); if (ret) return ret; /* Disable the internal detection circuit */ ret = pmic_usb_enable_usb_det(pdata, false); if (ret) return ret; /* Read the VBUS presence status for initial update, and trigger USB type detection if applicable */ ret = pmic_usb_vbus_det(pdata); if (ret) return ret; /* Enable USBID and ACA detection */ ret = pmic_usb_enable_id_aca_det(pdata, true, true); if (ret) return ret; /* Read the USBID and ACA detection result */ ret = pmic_usb_id_det(pdata, false, false); if (ret) return ret; ret = device_init_wakeup(&pdev->dev, true); if (ret) dev_err(&pdev->dev, "%s: can't set wakeup src\n", __func__); return ret; }
static int rockchip_vpu_probe(struct platform_device *pdev) { const struct of_device_id *match; struct rockchip_vpu_dev *vpu; struct resource *res; int i, ret; vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL); if (!vpu) return -ENOMEM; vpu->dev = &pdev->dev; vpu->pdev = pdev; mutex_init(&vpu->vpu_mutex); spin_lock_init(&vpu->irqlock); match = of_match_node(of_rockchip_vpu_match, pdev->dev.of_node); vpu->variant = match->data; INIT_DELAYED_WORK(&vpu->watchdog_work, rockchip_vpu_watchdog); for (i = 0; i < vpu->variant->num_clocks; i++) vpu->clocks[i].id = vpu->variant->clk_names[i]; ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks, vpu->clocks); if (ret) return ret; res = platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0); vpu->base = devm_ioremap_resource(vpu->dev, res); if (IS_ERR(vpu->base)) return PTR_ERR(vpu->base); vpu->enc_base = vpu->base + vpu->variant->enc_offset; ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(vpu->dev, "Could not set DMA coherent mask.\n"); return ret; } if (vpu->variant->vepu_irq) { int irq; irq = platform_get_irq_byname(vpu->pdev, "vepu"); if (irq <= 0) { dev_err(vpu->dev, "Could not get vepu IRQ.\n"); return -ENXIO; } ret = devm_request_irq(vpu->dev, irq, vpu->variant->vepu_irq, 0, dev_name(vpu->dev), vpu); if (ret) { dev_err(vpu->dev, "Could not request vepu IRQ.\n"); return ret; } } ret = vpu->variant->init(vpu); if (ret) { dev_err(&pdev->dev, "Failed to init VPU hardware\n"); return ret; } pm_runtime_set_autosuspend_delay(vpu->dev, 100); pm_runtime_use_autosuspend(vpu->dev); pm_runtime_enable(vpu->dev); ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks); if (ret) { dev_err(&pdev->dev, "Failed to prepare clocks\n"); return ret; } ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); goto err_clk_unprepare; } platform_set_drvdata(pdev, vpu); vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); if (IS_ERR(vpu->m2m_dev)) { v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(vpu->m2m_dev); goto err_v4l2_unreg; } vpu->mdev.dev = vpu->dev; strlcpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model)); media_device_init(&vpu->mdev); vpu->v4l2_dev.mdev = &vpu->mdev; ret = rockchip_vpu_video_device_register(vpu); if (ret) { dev_err(&pdev->dev, "Failed to register encoder\n"); goto err_m2m_rel; } ret = media_device_register(&vpu->mdev); if (ret) { v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n"); goto err_video_dev_unreg; } return 0; err_video_dev_unreg: if (vpu->vfd_enc) { video_unregister_device(vpu->vfd_enc); video_device_release(vpu->vfd_enc); } err_m2m_rel: v4l2_m2m_release(vpu->m2m_dev); err_v4l2_unreg: v4l2_device_unregister(&vpu->v4l2_dev); err_clk_unprepare: clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks); pm_runtime_disable(vpu->dev); return ret; }
/* ap9540_c2c_probe - probe C2C ressources */ static int ap9540_c2c_probe(struct platform_device *pdev) { struct c2c_platform_data *pdata = pdev->dev.platform_data; struct ap9540_c2c *c2c, *c2c_alloc; struct resource *res; int ret = 0, i; int align = 0x3f; if (c2c_dev) return -EBUSY; /* alocate c2c structure, with 64byte address alignement */ c2c = kzalloc(sizeof(*c2c) + align, GFP_KERNEL); if (c2c == NULL) return -ENOMEM; c2c_alloc =\ (struct ap9540_c2c *) PTR_ALIGN((struct ap9540_c2c *)c2c, align); c2c_dev_align = (int)(c2c_alloc - c2c); c2c = (struct ap9540_c2c *) c2c_alloc; /* init c2c structure */ c2c->dev = &pdev->dev; c2c->pdev = pdev; mutex_init(&c2c->lock); for (i = 31; i >= 0; i--) c2c->genio[i].irq = -1; c2c->irq1 = -1; c2c->wumod_gpio = -1; INIT_WORK(&c2c->ipc_ready_work, ipc_ready_work); INIT_WORK(&c2c->caif_ready_work, caif_ready_work); INIT_WORK(&c2c->protection_work, protection_work); init_timer(&c2c->powerup_timer); c2c->powerup_timeout_ms = 1000; c2c->powerup_timer.function = powerup_timer_elapsed; c2c->powerup_timer.data = (unsigned long) c2c; init_waitqueue_head(&c2c->waitq); /* PRCMU and C2C registers base adresses */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "C2C_REGS"); if (!res) { ret = -ENODEV; goto err; } c2c->c2c_base = ioremap(res->start, res->end - res->start + 1); if (c2c->c2c_base == NULL) { ret = -EIO; goto err; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "PRCMU_REGS"); if (!res) { ret = -ENODEV; goto err; } c2c->prcm_base = ioremap(res->start, res->end - res->start + 1); if (c2c->prcm_base == NULL) { ret = -EIO; goto err; } /* GPIO WU_MOD */ if (!pdata) { dev_err(&pdev->dev, "invalid c2c platform data\n"); goto err; } c2c->wumod_gpio = pdata->wumod_gpio; ret = gpio_request(c2c->wumod_gpio, "WU_MOD"); if (ret < 0) { c2c->wumod_gpio = -1; goto err; } ret = gpio_direction_output(c2c->wumod_gpio, 1); if (ret < 0) goto err; /* C2C IRQ[1], required for IPC_READY/CAIF_READY */ c2c->irq1 = platform_get_irq_byname(c2c->pdev, "C2C_IRQ1"); if (c2c->irq1 < 0) { ret = -ENODEV; goto err; } ret = devm_request_threaded_irq(c2c->dev, c2c->irq1, c2c_irq1_top, c2c_irq1_bottom, IRQF_NO_SUSPEND, "C2C_IRQ1", c2c); if (ret) goto err; ret = ap9540_c2c_debugfs(c2c); if (ret) goto err; ret = ap9540_c2c_deep_debug_init(c2c); if (ret) goto err; /* GIC do not treat GENO lines as IRQs */ MASK_GENO(0xFFFFFFFF, c2c->prcm_base); /* register GENIO driver to genio wrapper */ ret = genio_register_driver(&ap9540_c2c_genio_apis); if (ret) goto err; /* Register PRCMU fmw driver services */ prcmu_register_modem("c2c"); upap_register_notifier(UPAP_NFYID_C2C_NOTIF, &c2c_powerup_nb); /* * TODO: register to the several c2c reset/crash notification * once prcmu driver is ready. Single handler: c2c_reset_nb. */ c2c->no_power_down = 1; /* FIXME: to be removed useless */ c2c_dev = c2c; platform_set_drvdata(pdev, c2c); dev_info(c2c_dev->dev, "probed\n"); return ret; err: if (c2c) { ap9540_c2c_clear(c2c); kfree(((void *) c2c) - c2c_dev_align); c2c_dev_align = 0; } return ret; }
static __devinit int wm831x_buckv_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->dcdc); struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; dcdc->desc.ops = &wm831x_buckv_ops; dcdc->desc.owner = THIS_MODULE; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret); goto err; } dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); goto err; } dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK; if (pdata->dcdc[id]) wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data); dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } irq = platform_get_irq_byname(pdev, "HC"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", irq, ret); goto err_uv; } platform_set_drvdata(pdev, dcdc); return 0; err_uv: wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); err_regulator: regulator_unregister(dcdc->regulator); err: if (dcdc->dvs_gpio) gpio_free(dcdc->dvs_gpio); kfree(dcdc); return ret; }
static int ehci_msm2_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; struct msm_hcd *mhcd; struct pinctrl_state *set_state; const struct msm_usb_host_platform_data *pdata; char pdev_name[PDEV_NAME_LEN]; int ret; dev_dbg(&pdev->dev, "ehci_msm2 probe\n"); /* * Fail probe in case of uicc till userspace activates driver through * sysfs entry. */ if (!uicc_card_present && pdev->dev.of_node && of_property_read_bool( pdev->dev.of_node, "qcom,usb2-enable-uicc")) return -ENODEV; hcd = usb_create_hcd(&ehci_msm2_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } mhcd = hcd_to_mhcd(hcd); mhcd->dev = &pdev->dev; mhcd->xo_clk = clk_get(&pdev->dev, "xo"); if (IS_ERR(mhcd->xo_clk)) { ret = PTR_ERR(mhcd->xo_clk); mhcd->xo_clk = NULL; if (ret == -EPROBE_DEFER) goto put_hcd; } ret = msm_ehci_init_clocks(mhcd, 1); if (ret) goto xo_put; if (pdev->dev.of_node) { dev_dbg(&pdev->dev, "device tree enabled\n"); pdev->dev.platform_data = ehci_msm2_dt_to_pdata(pdev); } if (!pdev->dev.platform_data) dev_dbg(&pdev->dev, "No platform data given\n"); pdata = pdev->dev.platform_data; if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &ehci_msm_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd_to_bus(hcd)->skip_resume = true; hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto deinit_clocks; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto deinit_clocks; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto deinit_clocks; } spin_lock_init(&mhcd->wakeup_lock); mhcd->async_irq = platform_get_irq_byname(pdev, "async_irq"); if (mhcd->async_irq < 0) { dev_dbg(&pdev->dev, "platform_get_irq for async_int failed\n"); mhcd->async_irq = 0; } else { ret = request_irq(mhcd->async_irq, msm_async_irq, IRQF_TRIGGER_RISING, "msm_ehci_host", mhcd); if (ret) { dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n"); goto unmap; } disable_irq(mhcd->async_irq); } snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id); if (mhcd->xo_clk) ret = clk_prepare_enable(mhcd->xo_clk); if (ret) { dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n", __func__, ret); goto free_xo_handle; } /* Get pinctrl if target uses pinctrl */ mhcd->hsusb_pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(mhcd->hsusb_pinctrl)) { if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) { dev_err(&pdev->dev, "Error encountered while getting pinctrl"); ret = PTR_ERR(mhcd->hsusb_pinctrl); goto devote_xo_handle; } pr_debug("Target does not use pinctrl\n"); mhcd->hsusb_pinctrl = NULL; } if (mhcd->hsusb_pinctrl) { set_state = pinctrl_lookup_state(mhcd->hsusb_pinctrl, "ehci_active"); if (IS_ERR(set_state)) { pr_err("cannot get hsusb pinctrl active state\n"); ret = PTR_ERR(set_state); goto devote_xo_handle; } ret = pinctrl_select_state(mhcd->hsusb_pinctrl, set_state); if (ret) { pr_err("cannot set hsusb pinctrl active state\n"); goto devote_xo_handle; } } if (pdata && gpio_is_valid(pdata->resume_gpio)) { mhcd->resume_gpio = pdata->resume_gpio; ret = devm_gpio_request(&pdev->dev, mhcd->resume_gpio, "hsusb_resume"); if (ret) { dev_err(&pdev->dev, "resume gpio(%d) request failed:%d\n", mhcd->resume_gpio, ret); mhcd->resume_gpio = -EINVAL; } else { /* to override ehci_bus_resume from ehci-hcd library */ ehci_bus_resume_func = ehci_msm2_hc_driver.bus_resume; ehci_msm2_hc_driver.bus_resume = msm_ehci_bus_resume_with_gpio; } } if (pdata && gpio_is_valid(pdata->ext_hub_reset_gpio)) { ret = devm_gpio_request(&pdev->dev, pdata->ext_hub_reset_gpio, "hsusb_reset"); if (ret) { dev_err(&pdev->dev, "reset gpio(%d) request failed:%d\n", pdata->ext_hub_reset_gpio, ret); goto pinctrl_sleep; } else { /* reset external hub */ gpio_direction_output(pdata->ext_hub_reset_gpio, 0); /* * Hub reset should be asserted for minimum 5microsec * before deasserting. */ usleep_range(5, 1000); gpio_direction_output(pdata->ext_hub_reset_gpio, 1); } } spin_lock_init(&mhcd->wakeup_lock); ret = msm_ehci_init_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize VDDCX\n"); ret = -ENODEV; goto pinctrl_sleep; } ret = msm_ehci_config_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vddcx configuration failed\n"); goto deinit_vddcx; } ret = msm_ehci_ldo_init(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); goto deinit_vddcx; } ret = msm_ehci_ldo_enable(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg enable failed\n"); goto deinit_ldo; } ret = msm_ehci_init_vbus(mhcd, 1); if (ret) goto disable_ldo; hcd->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0); if (IS_ERR(hcd->phy)) { if (PTR_ERR(hcd->phy) == -EPROBE_DEFER) { dev_dbg(&pdev->dev, "usb-phy not probed yet\n"); ret = -EPROBE_DEFER; goto vbus_deinit; } hcd->phy = NULL; } if (hcd->phy) { usb_phy_init(hcd->phy); /* Set Host mode flag */ hcd->phy->flags |= PHY_HOST_MODE; } else if (pdata && pdata->use_sec_phy) { mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL2; } else { mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL; } ret = msm_hsusb_reset(mhcd); if (ret) { dev_err(&pdev->dev, "hsusb PHY initialization failed\n"); goto vbus_deinit; } ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); if (ret) { dev_err(&pdev->dev, "unable to register HCD\n"); goto vbus_deinit; } mhcd->bus_scale_table = msm_bus_cl_get_pdata(pdev); if (!mhcd->bus_scale_table) { dev_dbg(&pdev->dev, "bus scaling is disabled\n"); } else { mhcd->bus_perf_client = msm_bus_scale_register_client(mhcd->bus_scale_table); ret = msm_bus_scale_client_update_request( mhcd->bus_perf_client, 1); if (ret) dev_err(&pdev->dev, "Failed to vote for bus scaling\n"); } pdata = mhcd->dev->platform_data; if (pdata && (!pdata->dock_connect_irq || !irq_read_line(pdata->dock_connect_irq))) msm_ehci_vbus_power(mhcd, 1); /* For peripherals directly conneted to downstream port of root hub * and require to drive suspend and resume by controller driver instead * of root hub. */ if (pdata) mhcd->ehci.no_selective_suspend = pdata->no_selective_suspend; mhcd->wakeup_irq = platform_get_irq_byname(pdev, "wakeup_irq"); if (mhcd->wakeup_irq > 0) { dev_dbg(&pdev->dev, "wakeup irq:%d\n", mhcd->wakeup_irq); irq_set_status_flags(mhcd->wakeup_irq, IRQ_NOAUTOEN); ret = request_irq(mhcd->wakeup_irq, msm_hsusb_wakeup_irq, IRQF_TRIGGER_HIGH, "msm_hsusb_wakeup", mhcd); if (ret) { dev_err(&pdev->dev, "request_irq(%d) failed:%d\n", mhcd->wakeup_irq, ret); mhcd->wakeup_irq = 0; } } else { mhcd->wakeup_irq = 0; } device_init_wakeup(&pdev->dev, 1); wakeup_source_init(&mhcd->ws, dev_name(&pdev->dev)); pm_stay_awake(mhcd->dev); INIT_WORK(&mhcd->phy_susp_fail_work, msm_ehci_phy_susp_fail_work); /* * This pdev->dev is assigned parent of root-hub by USB core, * hence, runtime framework automatically calls this driver's * runtime APIs based on root-hub's state. */ /* configure pmic_gpio_irq for D+ change */ if (pdata && pdata->pmic_gpio_dp_irq) mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq; if (mhcd->pmic_gpio_dp_irq) { ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL, msm_ehci_host_wakeup_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "msm_ehci_host_wakeup", mhcd); if (!ret) { disable_irq_nosync(mhcd->pmic_gpio_dp_irq); } else { dev_err(&pdev->dev, "request_irq(%d) failed: %d\n", mhcd->pmic_gpio_dp_irq, ret); mhcd->pmic_gpio_dp_irq = 0; } } if (pdata && pdata->pm_qos_latency) pm_qos_add_request(&mhcd->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY, pdata->pm_qos_latency + 1); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); if (ehci_debugfs_init(mhcd) < 0) dev_err(mhcd->dev, "%s: debugfs init failed\n", __func__); return 0; vbus_deinit: msm_ehci_init_vbus(mhcd, 0); disable_ldo: msm_ehci_ldo_enable(mhcd, 0); deinit_ldo: msm_ehci_ldo_init(mhcd, 0); deinit_vddcx: msm_ehci_init_vddcx(mhcd, 0); pinctrl_sleep: if (mhcd->hsusb_pinctrl) { set_state = pinctrl_lookup_state(mhcd->hsusb_pinctrl, "ehci_sleep"); if (IS_ERR(set_state)) pr_err("cannot get hsusb pinctrl sleep state\n"); else pinctrl_select_state(mhcd->hsusb_pinctrl, set_state); } devote_xo_handle: if (mhcd->xo_clk) clk_disable_unprepare(mhcd->xo_clk); free_xo_handle: if (mhcd->xo_clk) { clk_put(mhcd->xo_clk); mhcd->xo_clk = NULL; } if (mhcd->async_irq) free_irq(mhcd->async_irq, mhcd); unmap: iounmap(hcd->regs); deinit_clocks: msm_ehci_init_clocks(mhcd, 0); xo_put: if (mhcd->xo_clk) clk_put(mhcd->xo_clk); put_hcd: usb_put_hcd(hcd); return ret; }
static __devinit int wm831x_boostp_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->dcdc); struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.ops = &wm831x_boostp_ops; dcdc->desc.owner = THIS_MODULE; dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } platform_set_drvdata(pdev, dcdc); return 0; err_regulator: regulator_unregister(dcdc->regulator); err: kfree(dcdc); return ret; }
static int msm_iommu_probe(struct platform_device *pdev) { struct resource *r; struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; void __iomem *regs_base; int ret, irq, par; if (pdev->id == -1) { msm_iommu_root_dev = pdev; return 0; } drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); if (!drvdata) { ret = -ENOMEM; goto fail; } if (!iommu_dev) { ret = -ENODEV; goto fail; } iommu_pclk = clk_get(NULL, "smmu_pclk"); if (IS_ERR(iommu_pclk)) { ret = -ENODEV; goto fail; } ret = clk_prepare_enable(iommu_pclk); if (ret) goto fail_enable; iommu_clk = clk_get(&pdev->dev, "iommu_clk"); if (!IS_ERR(iommu_clk)) { if (clk_get_rate(iommu_clk) == 0) clk_set_rate(iommu_clk, 1); ret = clk_prepare_enable(iommu_clk); if (ret) { clk_put(iommu_clk); goto fail_pclk; } } else iommu_clk = NULL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); regs_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(regs_base)) { ret = PTR_ERR(regs_base); goto fail_clk; } irq = platform_get_irq_byname(pdev, "secure_irq"); if (irq < 0) { ret = -ENODEV; goto fail_clk; } msm_iommu_reset(regs_base, iommu_dev->ncb); SET_M(regs_base, 0, 1); SET_PAR(regs_base, 0, 0); SET_V2PCFG(regs_base, 0, 1); SET_V2PPR(regs_base, 0, 0); par = GET_PAR(regs_base, 0); SET_V2PCFG(regs_base, 0, 0); SET_M(regs_base, 0, 0); if (!par) { pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); ret = -ENODEV; goto fail_clk; } ret = request_irq(irq, msm_iommu_fault_handler, 0, "msm_iommu_secure_irpt_handler", drvdata); if (ret) { pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); goto fail_clk; } drvdata->pclk = iommu_pclk; drvdata->clk = iommu_clk; drvdata->base = regs_base; drvdata->irq = irq; drvdata->ncb = iommu_dev->ncb; pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", iommu_dev->name, regs_base, irq, iommu_dev->ncb); platform_set_drvdata(pdev, drvdata); if (iommu_clk) clk_disable(iommu_clk); clk_disable(iommu_pclk); return 0; fail_clk: if (iommu_clk) { clk_disable(iommu_clk); clk_put(iommu_clk); } fail_pclk: clk_disable_unprepare(iommu_pclk); fail_enable: clk_put(iommu_pclk); fail: kfree(drvdata); return ret; }
int cmc220_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata) { int ret = 0; int irq = 0; unsigned long flag = 0; struct platform_device *pdev = NULL; mc->gpio_cp_on = pdata->gpio_cp_on; mc->gpio_cp_off = pdata->gpio_cp_off; mc->gpio_cp_reset = pdata->gpio_cp_reset; mc->gpio_phone_active = pdata->gpio_phone_active; /*TODO: check the GPIO map*/ mc->gpio_pda_active = pdata->gpio_pda_active; mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int; mc->gpio_flm_uart_sel = pdata->gpio_flm_uart_sel; mc->gpio_slave_wakeup = pdata->gpio_slave_wakeup; mc->gpio_host_active = pdata->gpio_host_active; mc->gpio_host_wakeup = pdata->gpio_host_wakeup; if (!mc->gpio_cp_on || !mc->gpio_cp_reset || !mc->gpio_phone_active) { mif_err("no GPIO data\n"); return -ENXIO; } gpio_set_value(mc->gpio_cp_reset, 0); gpio_set_value(mc->gpio_cp_on, 0); if (mc->gpio_cp_off) gpio_set_value(mc->gpio_cp_off, 1); cmc220_get_ops(mc); dev_set_drvdata(mc->dev, mc); INIT_DELAYED_WORK(&mc->dwork, mc_work); pdev = to_platform_device(mc->dev); mc->irq_phone_active = platform_get_irq_byname(pdev, "lte_phone_active"); if (!mc->irq_phone_active) { mif_err("get irq fail\n"); return -1; } irq = mc->irq_phone_active; mif_err("PHONE_ACTIVE IRQ# = %d\n", irq); flag = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND; /* flag |= IRQF_NO_SUSPEND; */ ret = request_irq(irq, phone_active_handler, flag, "cmc_active", mc); if (ret) { mif_err("request_irq fail (%d)\n", ret); return ret; } /* disable_irq(irq); */ ret = enable_irq_wake(irq); if (ret) mif_err("enable_irq_wake fail (%d)\n", ret); mif_err("IRQ#%d handler is registered.\n", irq); return 0; }
static int vlynq_probe(struct platform_device *pdev) { struct vlynq_device *dev; struct resource *regs_res, *mem_res, *irq_res; int len, result; regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!regs_res) return -ENODEV; mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!mem_res) return -ENODEV; irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq"); if (!irq_res) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { printk(KERN_ERR "vlynq: failed to allocate device structure\n"); return -ENOMEM; } dev->id = pdev->id; dev->dev.bus = &vlynq_bus_type; dev->dev.parent = &pdev->dev; dev_set_name(&dev->dev, "vlynq%d", dev->id); dev->dev.platform_data = pdev->dev.platform_data; dev->dev.release = vlynq_device_release; dev->regs_start = regs_res->start; dev->regs_end = regs_res->end; dev->mem_start = mem_res->start; dev->mem_end = mem_res->end; len = resource_size(regs_res); if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { printk(KERN_ERR "%s: Can't request vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_request; } dev->local = ioremap(regs_res->start, len); if (!dev->local) { printk(KERN_ERR "%s: Can't remap vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_remap; } dev->remote = (struct vlynq_regs *)((void *)dev->local + VLYNQ_REMOTE_OFFSET); dev->irq = platform_get_irq_byname(pdev, "irq"); dev->irq_start = irq_res->start; dev->irq_end = irq_res->end; dev->local_irq = dev->irq_end - dev->irq_start; dev->remote_irq = dev->local_irq - 1; if (device_register(&dev->dev)) goto fail_register; platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n", dev_name(&dev->dev), (void *)dev->regs_start, dev->irq, (void *)dev->mem_start); dev->dev_id = 0; dev->divisor = vlynq_div_auto; result = __vlynq_enable_device(dev); if (result == 0) { dev->dev_id = readl(&dev->remote->chip); ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev); } if (dev->dev_id) printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id); return 0; fail_register: iounmap(dev->local); fail_remap: fail_request: release_mem_region(regs_res->start, len); kfree(dev); return result; }
static __devinit int wm831x_aldo_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id; struct wm831x_ldo *ldo; struct resource *res; int ret, irq; if (pdata && pdata->wm831x_num) id = (pdata->wm831x_num * 10) + 1; else id = 0; id = pdev->id - id; dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); if (pdata == NULL || pdata->ldo[id] == NULL) return -ENODEV; ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL); if (ldo == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } ldo->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } ldo->base = res->start; snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1); ldo->desc.name = ldo->name; ldo->desc.id = id; ldo->desc.type = REGULATOR_VOLTAGE; ldo->desc.n_voltages = WM831X_ALDO_MAX_SELECTOR + 1; ldo->desc.ops = &wm831x_aldo_ops; ldo->desc.owner = THIS_MODULE; ldo->regulator = regulator_register(&ldo->desc, &pdev->dev, pdata->ldo[id], ldo); if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm831x->dev, "Failed to register LDO%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, IRQF_TRIGGER_RISING, ldo->name, ldo); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } platform_set_drvdata(pdev, ldo); return 0; err_regulator: regulator_unregister(ldo->regulator); err: kfree(ldo); return ret; }
static int ehci_msm2_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; struct msm_hcd *mhcd; const struct msm_usb_host_platform_data *pdata; char pdev_name[PDEV_NAME_LEN]; int ret; dev_dbg(&pdev->dev, "ehci_msm2 probe\n"); hcd = usb_create_hcd(&ehci_msm2_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } mhcd = hcd_to_mhcd(hcd); mhcd->dev = &pdev->dev; mhcd->xo_clk = clk_get(&pdev->dev, "xo"); if (IS_ERR(mhcd->xo_clk)) { ret = PTR_ERR(mhcd->xo_clk); mhcd->xo_clk = NULL; if (ret == -EPROBE_DEFER) goto put_hcd; } ret = msm_ehci_init_clocks(mhcd, 1); if (ret) goto xo_put; if (pdev->dev.of_node) { dev_dbg(&pdev->dev, "device tree enabled\n"); pdev->dev.platform_data = ehci_msm2_dt_to_pdata(pdev); } if (!pdev->dev.platform_data) dev_dbg(&pdev->dev, "No platform data given\n"); pdata = pdev->dev.platform_data; if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &ehci_msm_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd_to_bus(hcd)->skip_resume = true; hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto deinit_clocks; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto deinit_clocks; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto deinit_clocks; } spin_lock_init(&mhcd->wakeup_lock); mhcd->async_irq = platform_get_irq_byname(pdev, "async_irq"); if (mhcd->async_irq < 0) { dev_dbg(&pdev->dev, "platform_get_irq for async_int failed\n"); mhcd->async_irq = 0; } else { ret = request_irq(mhcd->async_irq, msm_async_irq, IRQF_TRIGGER_RISING, "msm_ehci_host", mhcd); if (ret) { dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n"); goto unmap; } disable_irq(mhcd->async_irq); } snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id); if (mhcd->xo_clk) { ret = clk_prepare_enable(mhcd->xo_clk); } else { mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name); if (IS_ERR(mhcd->xo_handle)) { dev_err(&pdev->dev, "%s fail to get handle for X0 D0\n", __func__); ret = PTR_ERR(mhcd->xo_handle); mhcd->xo_handle = NULL; goto free_async_irq; } else { ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON); } } if (ret) { dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n", __func__, ret); goto free_xo_handle; } if (pdata && gpio_is_valid(pdata->resume_gpio)) { mhcd->resume_gpio = pdata->resume_gpio; ret = devm_gpio_request(&pdev->dev, mhcd->resume_gpio, "hsusb_resume"); if (ret) { dev_err(&pdev->dev, "resume gpio(%d) request failed:%d\n", mhcd->resume_gpio, ret); mhcd->resume_gpio = -EINVAL; } else { /* to override ehci_bus_resume from ehci-hcd library */ ehci_bus_resume_func = ehci_msm2_hc_driver.bus_resume; ehci_msm2_hc_driver.bus_resume = msm_ehci_bus_resume_with_gpio; } } if (pdata && gpio_is_valid(pdata->ext_hub_reset_gpio)) { ret = devm_gpio_request(&pdev->dev, pdata->ext_hub_reset_gpio, "hsusb_reset"); if (ret) { dev_err(&pdev->dev, "reset gpio(%d) request failed:%d\n", pdata->ext_hub_reset_gpio, ret); goto devote_xo_handle; } else { /* reset external hub */ gpio_direction_output(pdata->ext_hub_reset_gpio, 0); /* * Hub reset should be asserted for minimum 5microsec * before deasserting. */ usleep_range(5, 1000); gpio_direction_output(pdata->ext_hub_reset_gpio, 1); } } spin_lock_init(&mhcd->wakeup_lock); ret = msm_ehci_init_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize VDDCX\n"); ret = -ENODEV; goto devote_xo_handle; } ret = msm_ehci_config_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vddcx configuration failed\n"); goto devote_xo_handle; } ret = msm_ehci_ldo_init(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); goto deinit_vddcx; } ret = msm_ehci_ldo_enable(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg enable failed\n"); goto deinit_ldo; } ret = msm_ehci_init_vbus(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to get vbus\n"); goto disable_ldo; } if (pdata && pdata->use_sec_phy) mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL2; else mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL; ret = msm_hsusb_reset(mhcd); if (ret) { dev_err(&pdev->dev, "hsusb PHY initialization failed\n"); goto vbus_deinit; } ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); if (ret) { dev_err(&pdev->dev, "unable to register HCD\n"); goto vbus_deinit; } pdata = mhcd->dev->platform_data; if (pdata && (!pdata->dock_connect_irq || !irq_read_line(pdata->dock_connect_irq))) msm_ehci_vbus_power(mhcd, 1); /* For peripherals directly conneted to downstream port of root hub * and require to drive suspend and resume by controller driver instead * of root hub. */ if (pdata) mhcd->ehci.no_selective_suspend = pdata->no_selective_suspend; mhcd->wakeup_irq = platform_get_irq_byname(pdev, "wakeup_irq"); if (mhcd->wakeup_irq > 0) { dev_dbg(&pdev->dev, "wakeup irq:%d\n", mhcd->wakeup_irq); irq_set_status_flags(mhcd->wakeup_irq, IRQ_NOAUTOEN); ret = request_irq(mhcd->wakeup_irq, msm_hsusb_wakeup_irq, IRQF_TRIGGER_HIGH, "msm_hsusb_wakeup", mhcd); if (ret) { dev_err(&pdev->dev, "request_irq(%d) failed:%d\n", mhcd->wakeup_irq, ret); mhcd->wakeup_irq = 0; } } else { mhcd->wakeup_irq = 0; } device_init_wakeup(&pdev->dev, 1); wakeup_source_init(&mhcd->ws, dev_name(&pdev->dev)); pm_stay_awake(mhcd->dev); INIT_WORK(&mhcd->phy_susp_fail_work, msm_ehci_phy_susp_fail_work); /* * This pdev->dev is assigned parent of root-hub by USB core, * hence, runtime framework automatically calls this driver's * runtime APIs based on root-hub's state. */ /* configure pmic_gpio_irq for D+ change */ if (pdata && pdata->pmic_gpio_dp_irq) mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq; if (mhcd->pmic_gpio_dp_irq) { ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL, msm_ehci_host_wakeup_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "msm_ehci_host_wakeup", mhcd); if (!ret) { disable_irq_nosync(mhcd->pmic_gpio_dp_irq); } else { dev_err(&pdev->dev, "request_irq(%d) failed: %d\n", mhcd->pmic_gpio_dp_irq, ret); mhcd->pmic_gpio_dp_irq = 0; } } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); if (ehci_debugfs_init(mhcd) < 0) dev_err(mhcd->dev, "%s: debugfs init failed\n", __func__); return 0; vbus_deinit: msm_ehci_init_vbus(mhcd, 0); disable_ldo: msm_ehci_ldo_enable(mhcd, 0); deinit_ldo: msm_ehci_ldo_init(mhcd, 0); deinit_vddcx: msm_ehci_init_vddcx(mhcd, 0); devote_xo_handle: if (mhcd->xo_clk) clk_disable_unprepare(mhcd->xo_clk); else msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF); free_xo_handle: if (mhcd->xo_clk) { clk_put(mhcd->xo_clk); mhcd->xo_clk = NULL; } else { msm_xo_put(mhcd->xo_handle); } free_async_irq: if (mhcd->async_irq) free_irq(mhcd->async_irq, mhcd); unmap: iounmap(hcd->regs); deinit_clocks: msm_ehci_init_clocks(mhcd, 0); xo_put: if (mhcd->xo_clk) clk_put(mhcd->xo_clk); put_hcd: usb_put_hcd(hcd); return ret; }
static int hisi_pmic_ocp_mntn_initial(struct platform_device *pdev, PMIC_MNTN_DESC *pmic_mntn) { struct device_node *root = NULL; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; PMIC_EXCH_REG *exch_reg_tmp; unsigned int index, bit; s32 ret = 0; char compatible_string[PMIC_DTS_ATTR_LEN] = {0}; ret = of_property_read_u32(np, "hisilicon,ocp-reg-num", (u32 *)&pmic_mntn->ocp_reg_n); if (ret) { dev_err(dev, "[%s]get ocp-reg-num attribute failed.\n", __func__); return -ENODEV; } pmic_mntn->ocp_regs = (u32 *)devm_kzalloc(dev, sizeof(u32)*pmic_mntn->ocp_reg_n, GFP_KERNEL); if (!pmic_mntn->ocp_regs) { dev_err(dev, "[%s]kzalloc ocp_regs buffer failed.\n", __func__); return -ENOMEM; } ret = of_property_read_u32_array(np, "hisilicon,ocp-regs", pmic_mntn->ocp_regs, pmic_mntn->ocp_reg_n); if (ret) { dev_err(dev, "[%s]get ocp-regs attribute failed.\n", __func__); return -ENODEV; } pmic_mntn->ocp_exch_desc = (PMIC_EXCH_REG *)devm_kzalloc(dev, sizeof(PMIC_EXCH_REG)*pmic_mntn->ocp_reg_n, GFP_KERNEL); if (!pmic_mntn->ocp_exch_desc) { dev_err(dev, "[%s]kzalloc ocp_exch_desc buff failed.\n", __func__); return -ENOMEM; } exch_reg_tmp = pmic_mntn->ocp_exch_desc; for (index = 0; index < pmic_mntn->ocp_reg_n; index++) { snprintf(compatible_string, PMIC_DTS_ATTR_LEN, "hisilicon,pmic-mntn-ocp-reg0x%0x", pmic_mntn->ocp_regs[index]); root = of_find_compatible_node(np, NULL, compatible_string); if (!root) { dev_err(dev, "[%s]no %s root node.\n", __func__, compatible_string); return -ENODEV; } exch_reg_tmp[index].event_bit_name = (char **)devm_kzalloc(dev, pmic_mntn->data_width*sizeof(char *), GFP_KERNEL); if (NULL == exch_reg_tmp[index].event_bit_name) { dev_err(dev, "[%s]devm_kzalloc event_bit_name error.\n", __func__); return -ENOMEM; } exch_reg_tmp[index].event_ops_reg = (u32 *)devm_kzalloc(dev, pmic_mntn->data_width*sizeof(u32), GFP_KERNEL); if (NULL == exch_reg_tmp[index].event_ops_reg) { dev_err(dev, "[%s]devm_kzalloc event_ops_reg error.\n", __func__); return -ENOMEM; } ret = of_property_read_u32(root, "hisilicon,inacceptable-event", (u32 *)&exch_reg_tmp[index].inacceptable_event); for (bit = 0; bit < pmic_mntn->data_width; bit++) { ret |= of_property_read_string_index(root, "hisilicon,event-bit-name", bit, (const char **)&exch_reg_tmp[index].event_bit_name[bit]); } ret |= of_property_read_u32_array(root, "hisilicon,event-ops-reg", (u32 *)exch_reg_tmp[index].event_ops_reg, pmic_mntn->data_width); if (ret) { dev_err(dev, "[%s]read attribute of %s.\n", __func__, compatible_string); return -ENODEV; } } pmic_mntn->ocp_event_buff = (u32 *)devm_kzalloc(dev, sizeof(u32)*pmic_mntn->ocp_reg_n, GFP_KERNEL); if (!pmic_mntn->ocp_event_buff) { dev_err(dev, "[%s]kzalloc ocp_event_buff failed.\n", __func__); return -ENOMEM; } hisi_pmic_clear_ocp(pmic_mntn); pmic_mntn->ocp_irq = platform_get_irq_byname(pdev, "ocp"); if (pmic_mntn->ocp_irq < 0) { dev_err(dev, "[%s] platform_get_irq_byname ocp failed.\n", __func__); return -ENODEV; } pmic_mntn->ocp_wq = create_singlethread_workqueue("pmu-ocp-wq"); INIT_WORK(&pmic_mntn->ocp_wk, hisi_pmic_ocp_wq_handler); ret = devm_request_irq(dev, pmic_mntn->ocp_irq, hisi_pmic_ocp_irq_handler, IRQF_DISABLED|IRQF_NO_SUSPEND, "pmu-ocp-irq", (void *)pmic_mntn); if (ret) { dev_err(dev, "[%s]request_irq ocp_irq failed.\n", __func__); return -ENODEV; } return 0; }
static int qpnp_usbdetect_probe(struct platform_device *pdev) { struct qpnp_usbdetect *usb; struct power_supply *usb_psy; int rc, vbus; unsigned long flags; usb_psy = power_supply_get_by_name("usb"); if (!usb_psy) { dev_dbg(&pdev->dev, "USB power_supply not found, deferring probe\n"); return -EPROBE_DEFER; } usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL); if (!usb) return -ENOMEM; usb->pdev = pdev; usb->usb_psy = usb_psy; if (of_get_property(pdev->dev.of_node, "vin-supply", NULL)) { usb->vin = devm_regulator_get(&pdev->dev, "vin"); if (IS_ERR(usb->vin)) { dev_err(&pdev->dev, "Failed to get VIN regulator: %ld\n", PTR_ERR(usb->vin)); return PTR_ERR(usb->vin); } } if (usb->vin) { rc = regulator_enable(usb->vin); if (rc) { dev_err(&pdev->dev, "Failed to enable VIN regulator: %d\n", rc); return rc; } } usb->vbus_det_irq = platform_get_irq_byname(pdev, "vbus_det_irq"); if (usb->vbus_det_irq < 0) { if (usb->vin) regulator_disable(usb->vin); return usb->vbus_det_irq; } rc = devm_request_irq(&pdev->dev, usb->vbus_det_irq, qpnp_usbdetect_vbus_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "vbus_det_irq", usb); if (rc) { dev_err(&pdev->dev, "request for vbus_det_irq failed: %d\n", rc); if (usb->vin) regulator_disable(usb->vin); return rc; } enable_irq_wake(usb->vbus_det_irq); dev_set_drvdata(&pdev->dev, usb); /* Read and report initial VBUS state */ local_irq_save(flags); vbus = !!irq_read_line(usb->vbus_det_irq); local_irq_restore(flags); power_supply_set_present(usb->usb_psy, vbus); return 0; }
static int msm_iommu_probe(struct platform_device *pdev) { struct resource *r, *r2; struct clk *iommu_clk = NULL; struct clk *iommu_pclk = NULL; struct msm_iommu_drvdata *drvdata; struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; void __iomem *regs_base; resource_size_t len; int ret, irq, par; if (pdev->id == -1) { msm_iommu_root_dev = pdev; return 0; } drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); if (!drvdata) { ret = -ENOMEM; goto fail; } if (!iommu_dev) { ret = -ENODEV; goto fail; } iommu_pclk = clk_get(NULL, "smmu_pclk"); if (IS_ERR(iommu_pclk)) { ret = -ENODEV; goto fail; } ret = clk_enable(iommu_pclk); if (ret) goto fail_enable; iommu_clk = clk_get(&pdev->dev, "iommu_clk"); if (!IS_ERR(iommu_clk)) { if (clk_get_rate(iommu_clk) == 0) clk_set_min_rate(iommu_clk, 1); ret = clk_enable(iommu_clk); if (ret) { clk_put(iommu_clk); goto fail_pclk; } } else iommu_clk = NULL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); if (!r) { ret = -ENODEV; goto fail_clk; } len = r->end - r->start + 1; r2 = request_mem_region(r->start, len, r->name); if (!r2) { pr_err("Could not request memory region: start=%p, len=%d\n", (void *) r->start, len); ret = -EBUSY; goto fail_clk; } regs_base = ioremap(r2->start, len); if (!regs_base) { pr_err("Could not ioremap: start=%p, len=%d\n", (void *) r2->start, len); ret = -EBUSY; goto fail_mem; } irq = platform_get_irq_byname(pdev, "secure_irq"); if (irq < 0) { ret = -ENODEV; goto fail_io; } msm_iommu_reset(regs_base, iommu_dev->ncb); SET_M(regs_base, 0, 1); SET_PAR(regs_base, 0, 0); SET_V2PCFG(regs_base, 0, 1); SET_V2PPR(regs_base, 0, 0); mb(); par = GET_PAR(regs_base, 0); SET_V2PCFG(regs_base, 0, 0); SET_M(regs_base, 0, 0); mb(); if (!par) { pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); ret = -ENODEV; goto fail_io; } ret = request_irq(irq, msm_iommu_fault_handler, 0, "msm_iommu_secure_irpt_handler", drvdata); if (ret) { pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); goto fail_io; } drvdata->pclk = iommu_pclk; drvdata->clk = iommu_clk; drvdata->base = regs_base; drvdata->irq = irq; drvdata->ncb = iommu_dev->ncb; pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", iommu_dev->name, regs_base, irq, iommu_dev->ncb); platform_set_drvdata(pdev, drvdata); if (iommu_clk) clk_disable(iommu_clk); clk_disable(iommu_pclk); return 0; fail_io: iounmap(regs_base); fail_mem: release_mem_region(r->start, len); fail_clk: if (iommu_clk) { clk_disable(iommu_clk); clk_put(iommu_clk); } fail_pclk: clk_disable(iommu_pclk); fail_enable: clk_put(iommu_pclk); fail: kfree(drvdata); return ret; }
struct link_device *dpram_create_link_device(struct platform_device *pdev) { struct modem_data *mdm_data = NULL; struct dpram_link_device *dpld = NULL; struct link_device *ld = NULL; struct resource *res = NULL; resource_size_t res_size; struct modemlink_dpram_control *dpctl = NULL; unsigned long task_data; int ret = 0; int i = 0; int bsize; int qsize; /* Get the platform data */ mdm_data = (struct modem_data *)pdev->dev.platform_data; if (!mdm_data) { mif_info("ERR! mdm_data == NULL\n"); goto err; } mif_info("modem = %s\n", mdm_data->name); mif_info("link device = %s\n", mdm_data->link_name); if (!mdm_data->dpram_ctl) { mif_info("ERR! mdm_data->dpram_ctl == NULL\n"); goto err; } dpctl = mdm_data->dpram_ctl; /* Alloc DPRAM link device structure */ dpld = kzalloc(sizeof(struct dpram_link_device), GFP_KERNEL); if (!dpld) { mif_info("ERR! kzalloc dpld fail\n"); goto err; } ld = &dpld->ld; task_data = (unsigned long)dpld; /* Retrieve modem data and DPRAM control data from the modem data */ ld->mdm_data = mdm_data; ld->name = mdm_data->link_name; ld->ipc_version = mdm_data->ipc_version; /* Retrieve the most basic data for IPC from the modem data */ dpld->dpctl = dpctl; dpld->dp_type = dpctl->dp_type; if (mdm_data->ipc_version < SIPC_VER_50) { if (!dpctl->max_ipc_dev) { mif_info("ERR! no max_ipc_dev\n"); goto err; } ld->aligned = dpctl->aligned; dpld->max_ipc_dev = dpctl->max_ipc_dev; } else { ld->aligned = 1; dpld->max_ipc_dev = MAX_SIPC5_DEV; } /* Set attributes as a link device */ ld->init_comm = dpram_link_init; ld->terminate_comm = dpram_link_terminate; ld->send = dpram_send; ld->force_dump = dpram_force_dump; ld->dump_start = dpram_dump_start; ld->dump_update = dpram_dump_update; ld->ioctl = dpram_ioctl; INIT_LIST_HEAD(&ld->list); skb_queue_head_init(&ld->sk_fmt_tx_q); skb_queue_head_init(&ld->sk_raw_tx_q); skb_queue_head_init(&ld->sk_rfs_tx_q); ld->skb_txq[IPC_FMT] = &ld->sk_fmt_tx_q; ld->skb_txq[IPC_RAW] = &ld->sk_raw_tx_q; ld->skb_txq[IPC_RFS] = &ld->sk_rfs_tx_q; /* Set up function pointers */ dpram_setup_common_op(dpld); dpld->dpram_dump = dpram_dump_memory; dpld->ext_op = dpram_get_ext_op(mdm_data->modem_type); /* Retrieve DPRAM resource */ if (!dpctl->dp_base) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { mif_info("%s: ERR! platform_get_resource fail\n", ld->name); goto err; } res_size = resource_size(res); dpctl->dp_base = ioremap_nocache(res->start, res_size); dpctl->dp_size = res_size; } dpld->dp_base = dpctl->dp_base; dpld->dp_size = dpctl->dp_size; mif_info("%s: dp_type %d, aligned %d, dp_base 0x%08X, dp_size %d\n", ld->name, dpld->dp_type, ld->aligned, (int)dpld->dp_base, dpld->dp_size); /* Initialize DPRAM map (physical map -> logical map) */ ret = dpram_table_init(dpld); if (ret < 0) { mif_info("%s: ERR! dpram_table_init fail (err %d)\n", ld->name, ret); goto err; } /* Disable IPC */ set_magic(dpld, 0); set_access(dpld, 0); dpld->dpram_init_status = DPRAM_INIT_STATE_NONE; /* Initialize locks, completions, and bottom halves */ snprintf(dpld->wlock_name, DP_MAX_NAME_LEN, "%s_wlock", ld->name); wake_lock_init(&dpld->wlock, WAKE_LOCK_SUSPEND, dpld->wlock_name); init_completion(&dpld->dpram_init_cmd); init_completion(&dpld->modem_pif_init_done); init_completion(&dpld->udl_start_complete); init_completion(&dpld->udl_cmd_complete); init_completion(&dpld->crash_start_complete); init_completion(&dpld->dump_start_complete); init_completion(&dpld->dump_recv_done); tasklet_init(&dpld->rx_tsk, dpram_ipc_rx_task, task_data); if (dpld->ext_op && dpld->ext_op->dl_task) tasklet_init(&dpld->dl_tsk, dpld->ext_op->dl_task, task_data); /* Prepare RXB queue */ qsize = DPRAM_MAX_RXBQ_SIZE; for (i = 0; i < dpld->max_ipc_dev; i++) { bsize = rxbq_get_page_size(get_rx_buff_size(dpld, i)); dpld->rxbq[i].size = qsize; dpld->rxbq[i].in = 0; dpld->rxbq[i].out = 0; dpld->rxbq[i].rxb = rxbq_create_pool(bsize, qsize); if (!dpld->rxbq[i].rxb) { mif_info("%s: ERR! %s rxbq_create_pool fail\n", ld->name, get_dev_name(i)); goto err; } mif_info("%s: %s rxbq_pool created (bsize:%d, qsize:%d)\n", ld->name, get_dev_name(i), bsize, qsize); } /* Prepare a multi-purpose miscellaneous buffer */ dpld->buff = kzalloc(dpld->dp_size, GFP_KERNEL); if (!dpld->buff) { mif_info("%s: ERR! kzalloc dpld->buff fail\n", ld->name); goto err; } /* Retrieve DPRAM IRQ GPIO# */ dpld->gpio_dpram_int = mdm_data->gpio_dpram_int; /* Retrieve DPRAM IRQ# */ if (!dpctl->dpram_irq) { dpctl->dpram_irq = platform_get_irq_byname(pdev, "dpram_irq"); if (dpctl->dpram_irq < 0) { mif_info("%s: ERR! platform_get_irq_byname fail\n", ld->name); goto err; } } dpld->irq = dpctl->dpram_irq; /* Retrieve DPRAM IRQ flags */ if (!dpctl->dpram_irq_flags) dpctl->dpram_irq_flags = (IRQF_NO_SUSPEND | IRQF_TRIGGER_LOW); dpld->irq_flags = dpctl->dpram_irq_flags; /* Register DPRAM interrupt handler */ snprintf(dpld->irq_name, DP_MAX_NAME_LEN, "%s_irq", ld->name); ret = dpram_register_isr(dpld->irq, dpram_irq_handler, dpld->irq_flags, dpld->irq_name, dpld); if (ret) goto err; return ld; err: if (dpld) { if (dpld->buff) kfree(dpld->buff); kfree(dpld); } return NULL; }
/** * dwc3_otg_init - Initializes otg related registers * @dwc: Pointer to out controller context structure * * Returns 0 on success otherwise negative errno. */ int dwc3_otg_init(struct dwc3 *dwc) { u32 reg; int ret = 0; struct dwc3_otg *dotg; dev_dbg(dwc->dev, "dwc3_otg_init\n"); /* * GHWPARAMS6[10] bit is SRPSupport. * This bit also reflects DWC_USB3_EN_OTG */ reg = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6); if (!(reg & DWC3_GHWPARAMS6_SRP_SUPPORT)) { /* * No OTG support in the HW core. * We return 0 to indicate no error, since this is acceptable * situation, just continue probe the dwc3 driver without otg. */ dev_dbg(dwc->dev, "dwc3_otg address space is not supported\n"); return 0; } /* Allocate and init otg instance */ dotg = kzalloc(sizeof(struct dwc3_otg), GFP_KERNEL); if (!dotg) { dev_err(dwc->dev, "unable to allocate dwc3_otg\n"); return -ENOMEM; } /* DWC3 has separate IRQ line for OTG events (ID/BSV etc.) */ dotg->irq = platform_get_irq_byname(to_platform_device(dwc->dev), "otg_irq"); if (dotg->irq < 0) { dev_err(dwc->dev, "%s: missing OTG IRQ\n", __func__); ret = -ENODEV; goto err1; } dotg->regs = dwc->regs; dotg->otg.set_peripheral = dwc3_otg_set_peripheral; dotg->otg.set_host = dwc3_otg_set_host; /* This reference is used by dwc3 modules for checking otg existance */ dwc->dotg = dotg; dotg->otg.phy = kzalloc(sizeof(struct usb_phy), GFP_KERNEL); if (!dotg->otg.phy) { dev_err(dwc->dev, "unable to allocate dwc3_otg.phy\n"); ret = -ENOMEM; goto err1; } dotg->dwc = dwc; dotg->otg.phy->otg = &dotg->otg; dotg->otg.phy->dev = dwc->dev; dotg->otg.phy->set_power = dwc3_otg_set_power; dotg->otg.phy->set_suspend = dwc3_otg_set_suspend; ret = usb_set_transceiver(dotg->otg.phy); if (ret) { dev_err(dotg->otg.phy->dev, "%s: failed to set transceiver, already exists\n", __func__); goto err2; } dotg->otg.phy->state = OTG_STATE_UNDEFINED; init_completion(&dotg->dwc3_xcvr_vbus_init); INIT_DELAYED_WORK(&dotg->sm_work, dwc3_otg_sm_work); ret = request_irq(dotg->irq, dwc3_otg_interrupt, IRQF_SHARED, "dwc3_otg", dotg); if (ret) { dev_err(dotg->otg.phy->dev, "failed to request irq #%d --> %d\n", dotg->irq, ret); goto err3; } pm_runtime_get(dwc->dev); return 0; err3: cancel_delayed_work_sync(&dotg->sm_work); usb_set_transceiver(NULL); err2: kfree(dotg->otg.phy); err1: dwc->dotg = NULL; kfree(dotg); return ret; }
static int __devinit ab8500_ponkey_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); struct ab8500_ponkey *ponkey; struct input_dev *input; int irq_dbf, irq_dbr; int error; irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF"); if (irq_dbf < 0) { dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf); return irq_dbf; } irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR"); if (irq_dbr < 0) { dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr); return irq_dbr; } ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL); input = input_allocate_device(); if (!ponkey || !input) { error = -ENOMEM; goto err_free_mem; } ponkey->idev = input; ponkey->ab8500 = ab8500; ponkey->irq_dbf = irq_dbf; ponkey->irq_dbr = irq_dbr; input->name = "AB8500 POn(PowerOn) Key"; input->dev.parent = &pdev->dev; input_set_capability(input, EV_KEY, KEY_POWER); error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler, 0, "ab8500-ponkey-dbf", ponkey); if (error < 0) { dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n", ponkey->irq_dbf, error); goto err_free_mem; } error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler, 0, "ab8500-ponkey-dbr", ponkey); if (error < 0) { dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n", ponkey->irq_dbr, error); goto err_free_dbf_irq; } error = input_register_device(ponkey->idev); if (error) { dev_err(ab8500->dev, "Can't register input device: %d\n", error); goto err_free_dbr_irq; } platform_set_drvdata(pdev, ponkey); return 0; err_free_dbr_irq: free_irq(ponkey->irq_dbr, ponkey); err_free_dbf_irq: free_irq(ponkey->irq_dbf, ponkey); err_free_mem: input_free_device(input); kfree(ponkey); return error; }
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, const char *ioname, const char *irqname, int ringsz) { int i, ret; gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->recover_work, recover_worker); setup_timer(&gpu->hangcheck_timer, hangcheck_handler, (unsigned long)gpu); BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); /* Map registers: */ gpu->mmio = msm_ioremap(pdev, ioname, name); if (IS_ERR(gpu->mmio)) { ret = PTR_ERR(gpu->mmio); goto fail; } /* Get Interrupt: */ gpu->irq = platform_get_irq_byname(pdev, irqname); if (gpu->irq < 0) { ret = gpu->irq; dev_err(drm->dev, "failed to get irq: %d\n", ret); goto fail; } ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, IRQF_TRIGGER_HIGH, gpu->name, gpu); if (ret) { dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); goto fail; } /* Acquire clocks: */ for (i = 0; i < ARRAY_SIZE(clk_names); i++) { gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); if (IS_ERR(gpu->grp_clks[i])) gpu->grp_clks[i] = NULL; } gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) gpu->ebi1_clk = NULL; /* Acquire regulators: */ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); DBG("gpu_reg: %p", gpu->gpu_reg); if (IS_ERR(gpu->gpu_reg)) gpu->gpu_reg = NULL; gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); DBG("gpu_cx: %p", gpu->gpu_cx); if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; /* Setup IOMMU.. eventually we will (I think) do this once per context * and have separate page tables per context. For now, to keep things * simple and to get something working, just use a single address space: */ gpu->iommu = iommu_domain_alloc(&platform_bus_type); if (!gpu->iommu) { dev_err(drm->dev, "failed to allocate IOMMU\n"); ret = -ENOMEM; goto fail; } gpu->id = msm_register_iommu(drm, gpu->iommu); /* Create ringbuffer: */ gpu->rb = msm_ringbuffer_new(gpu, ringsz); if (IS_ERR(gpu->rb)) { ret = PTR_ERR(gpu->rb); gpu->rb = NULL; dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); goto fail; } ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(drm->dev, "could not map ringbuffer: %d\n", ret); goto fail; } bs_init(gpu, pdev); return 0; fail: return ret; }
static int hi6421_onkey_probe(struct platform_device *pdev) { struct hi6421_onkey_info *info; struct device *dev = &pdev->dev; int ret; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->idev = input_allocate_device(); if (!info->idev) { dev_err(&pdev->dev, "Failed to allocate input device\n"); return -ENOMEM; } info->idev->name = "hi6421_on"; info->idev->phys = "hi6421_on/input0"; info->idev->dev.parent = &pdev->dev; info->idev->evbit[0] = BIT_MASK(EV_KEY); __set_bit(KEY_POWER, info->idev->keybit); info->irq[0] = platform_get_irq_byname(pdev, "down"); if (info->irq[0] < 0) { ret = -ENOENT; goto err_irq0; } ret = request_irq(info->irq[0], hi6421_onkey_handler, IRQF_DISABLED, "down", info); if (ret < 0) goto err_irq0; info->irq[1] = platform_get_irq_byname(pdev, "up"); if (info->irq[1] < 0) { ret = -ENOENT; goto err_irq1; } ret = request_irq(info->irq[1], hi6421_onkey_handler, IRQF_DISABLED, "up", info); if (ret < 0) goto err_irq1; info->irq[2] = platform_get_irq_byname(pdev, "hold 1s"); if (info->irq[2] < 0) { ret = -ENOENT; goto err_irq2; } ret = request_irq(info->irq[2], hi6421_onkey_handler, IRQF_DISABLED, "hold 1s", info); if (ret < 0) goto err_irq2; info->irq[3] = platform_get_irq_byname(pdev, "hold 10s"); if (info->irq[3] < 0) { ret = -ENOENT; goto err_irq3; } ret = request_irq(info->irq[3], hi6421_onkey_handler, IRQF_DISABLED, "hold 10s", info); if (ret < 0) goto err_irq3; ret = input_register_device(info->idev); if (ret) { dev_err(&pdev->dev, "Can't register input device: %d\n", ret); goto err_reg; } platform_set_drvdata(pdev, info); return ret; err_reg: free_irq(info->irq[3], info); err_irq3: free_irq(info->irq[2], info); err_irq2: free_irq(info->irq[1], info); err_irq1: free_irq(info->irq[0], info); err_irq0: input_free_device(info->idev); return ret; }