static int tve_probe(struct platform_device *pdev) { int ret, i; struct resource *res; struct tve_platform_data *plat_data = pdev->dev.platform_data; u32 conf_reg; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -ENOMEM; tve.pdev = pdev; tve.base = ioremap(res->start, res->end - res->start); tve.irq = platform_get_irq(pdev, 0); if (tve.irq < 0) { ret = tve.irq; goto err0; } INIT_DELAYED_WORK(&tve.cd_work, cd_work_func); ret = request_irq(tve.irq, tve_detect_handler, 0, pdev->name, pdev); if (ret < 0) goto err0; ret = device_create_file(&pdev->dev, &dev_attr_headphone); if (ret < 0) goto err1; for (i = 0; i < num_registered_fb; i++) { if (strcmp(registered_fb[i]->fix.id, "DISP3 BG - DI1") == 0) { tve_fbi = registered_fb[i]; break; } } /* adjust video mode for mx37 */ if (cpu_is_mx37()) { video_modes[0].left_margin = 121; video_modes[0].right_margin = 16; video_modes[0].upper_margin = 17; video_modes[0].lower_margin = 5; video_modes[1].left_margin = 131; video_modes[1].right_margin = 12; video_modes[1].upper_margin = 21; video_modes[1].lower_margin = 3; } if (tve_fbi != NULL) { fb_add_videomode(&video_modes[0], &tve_fbi->modelist); fb_add_videomode(&video_modes[1], &tve_fbi->modelist); } tve.dac_reg = regulator_get(&pdev->dev, plat_data->dac_reg); if (!IS_ERR(tve.dac_reg)) { regulator_set_voltage(tve.dac_reg, 2500000); regulator_enable(tve.dac_reg); } tve.dig_reg = regulator_get(&pdev->dev, plat_data->dig_reg); if (!IS_ERR(tve.dig_reg)) { regulator_set_voltage(tve.dig_reg, 1250000); regulator_enable(tve.dig_reg); } tve.clk = clk_get(&pdev->dev, "tve_clk"); clk_set_rate(tve.clk, 216000000); clk_enable(tve.clk); if (_tve_get_revision() == 1) { tve_regs = &tve_regs_v1; tve_reg_fields = &tve_reg_fields_v1; } else { tve_regs = &tve_regs_v2; tve_reg_fields = &tve_reg_fields_v2; } /* Setup cable detect, for YPrPb mode, default use channel#0 for Y */ __raw_writel(0x01067701, tve.base + tve_regs->tve_cd_cont_reg); /* tve_man_detect(); not working */ conf_reg = 0; __raw_writel(conf_reg, tve.base + tve_regs->tve_com_conf_reg); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg - 4 * 5); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg - 4 * 4); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg - 4 * 3); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg - 4 * 2); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg - 4); __raw_writel(0x00000000, tve.base + tve_regs->tve_mv_cont_reg); clk_disable(tve.clk); ret = fb_register_client(&nb); if (ret < 0) goto err2; return 0; err2: device_remove_file(&pdev->dev, &dev_attr_headphone); err1: free_irq(tve.irq, pdev); err0: iounmap(tve.base); return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up; struct resource *mem, *irq, *dma_tx, *dma_rx; struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; int ret = -ENOSPC; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } if (!request_mem_region(mem->start, resource_size(mem), pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!dma_rx) { ret = -EINVAL; goto err; } dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!dma_tx) { ret = -EINVAL; goto err; } up = kzalloc(sizeof(*up), GFP_KERNEL); if (up == NULL) { ret = -ENOMEM; goto do_release_region; } sprintf(up->name, "OMAP UART%d", pdev->id); up->pdev = pdev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = irq->start; up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; up->port.line = pdev->id; up->port.membase = omap_up_info->membase; up->port.mapbase = omap_up_info->mapbase; up->port.flags = omap_up_info->flags; up->port.irqflags = omap_up_info->irqflags; up->port.uartclk = omap_up_info->uartclk; up->uart_dma.uart_base = mem->start; if (omap_up_info->dma_enabled) { up->uart_dma.uart_dma_tx = dma_tx->start; up->uart_dma.uart_dma_rx = dma_rx->start; up->use_dma = 1; up->uart_dma.rx_buf_size = 4096; up->uart_dma.rx_timeout = 2; spin_lock_init(&(up->uart_dma.tx_lock)); spin_lock_init(&(up->uart_dma.rx_lock)); up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; } ui[pdev->id] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto do_release_region; platform_set_drvdata(pdev, up); return 0; err: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); do_release_region: release_mem_region(mem->start, resource_size(mem)); return ret; }
static int xhci_plat_probe(struct platform_device *pdev) { const struct hc_driver *driver; struct xhci_hcd *xhci; struct resource *res; struct usb_hcd *hcd; int ret; int irq; if (usb_disabled()) return -ENODEV; driver = &xhci_plat_xhci_driver; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&pdev->dev, "controller already in use\n"); ret = -EBUSY; goto put_hcd; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_dbg(&pdev->dev, "error mapping memory\n"); ret = -EFAULT; goto release_mem_region; } ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto unmap_registers; /* USB 2.0 roothub is stored in the platform_device now. */ hcd = platform_get_drvdata(pdev); xhci = hcd_to_xhci(hcd); xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev, dev_name(&pdev->dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto dealloc_usb2_hcd; } /* * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset) * is called by usb_add_hcd(). */ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; return 0; put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); dealloc_usb2_hcd: usb_remove_hcd(hcd); unmap_registers: iounmap(hcd->regs); release_mem_region: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up; struct resource *mem, *irq; struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev); int ret, uartirq = 0, wakeirq = 0; /* The optional wakeirq may be specified in the board dts file */ if (pdev->dev.of_node) { uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!uartirq) return -EPROBE_DEFER; wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1); omap_up_info = of_get_uart_port_info(&pdev->dev); pdev->dev.platform_data = omap_up_info; } else { irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } uartirq = irq->start; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } if (gpio_is_valid(omap_up_info->DTR_gpio) && omap_up_info->DTR_present) { ret = gpio_request(omap_up_info->DTR_gpio, "omap-serial"); if (ret < 0) return ret; ret = gpio_direction_output(omap_up_info->DTR_gpio, omap_up_info->DTR_inverted); if (ret < 0) return ret; } up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; if (gpio_is_valid(omap_up_info->DTR_gpio) && omap_up_info->DTR_present) { up->DTR_gpio = omap_up_info->DTR_gpio; up->DTR_inverted = omap_up_info->DTR_inverted; } else up->DTR_gpio = -EINVAL; up->DTR_active = 0; up->dev = &pdev->dev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = uartirq; up->wakeirq = wakeirq; if (!up->wakeirq) dev_info(up->port.dev, "no wakeirq for uart%d\n", up->port.line); up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; if (pdev->dev.of_node) up->port.line = of_alias_get_id(pdev->dev.of_node, "serial"); else up->port.line = pdev->id; if (up->port.line < 0) { dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", up->port.line); ret = -ENODEV; goto err_port_line; } ret = serial_omap_probe_rs485(up, pdev->dev.of_node); if (ret < 0) goto err_rs485; sprintf(up->name, "OMAP UART%d", up->port.line); up->port.mapbase = mem->start; up->port.membase = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!up->port.membase) { dev_err(&pdev->dev, "can't ioremap UART\n"); ret = -ENOMEM; goto err_ioremap; } up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; if (!up->port.uartclk) { up->port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, "No clock speed specified: using default: %d\n", DEFAULT_CLK_SPEED); } up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; pm_qos_add_request(&up->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, up->latency); serial_omap_uart_wq = create_singlethread_workqueue(up->name); INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); platform_set_drvdata(pdev, up); if (omap_up_info->autosuspend_timeout == 0) omap_up_info->autosuspend_timeout = -1; device_init_wakeup(up->dev, true); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->autosuspend_timeout); pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(up); ui[up->port.line] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto err_add_port; pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); return 0; err_add_port: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); err_ioremap: err_rs485: err_port_line: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); return ret; }
static int __devinit em_gio_probe(struct platform_device *pdev) { struct gpio_em_config *pdata = pdev->dev.platform_data; struct em_gio_priv *p; struct resource *io[2], *irq[2]; struct gpio_chip *gpio_chip; struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); int ret; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } p->pdev = pdev; platform_set_drvdata(pdev, p); spin_lock_init(&p->sense_lock); io[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0); io[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1); irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0); irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!io[0] || !io[1] || !irq[0] || !irq[1] || !pdata) { dev_err(&pdev->dev, "missing IRQ, IOMEM or configuration\n"); ret = -EINVAL; goto err1; } p->base0 = ioremap_nocache(io[0]->start, resource_size(io[0])); if (!p->base0) { dev_err(&pdev->dev, "failed to remap low I/O memory\n"); ret = -ENXIO; goto err1; } p->base1 = ioremap_nocache(io[1]->start, resource_size(io[1])); if (!p->base1) { dev_err(&pdev->dev, "failed to remap high I/O memory\n"); ret = -ENXIO; goto err2; } gpio_chip = &p->gpio_chip; gpio_chip->direction_input = em_gio_direction_input; gpio_chip->get = em_gio_get; gpio_chip->direction_output = em_gio_direction_output; gpio_chip->set = em_gio_set; gpio_chip->to_irq = em_gio_to_irq; gpio_chip->label = name; gpio_chip->owner = THIS_MODULE; gpio_chip->base = pdata->gpio_base; gpio_chip->ngpio = pdata->number_of_pins; irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = em_gio_irq_disable; irq_chip->irq_unmask = em_gio_irq_enable; irq_chip->irq_enable = em_gio_irq_enable; irq_chip->irq_disable = em_gio_irq_disable; irq_chip->irq_set_type = em_gio_irq_set_type; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE; ret = em_gio_irq_domain_init(p); if (ret) { dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err3; } if (request_irq(irq[0]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err4; } if (request_irq(irq[1]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request high IRQ\n"); ret = -ENOENT; goto err5; } ret = gpiochip_add(gpio_chip); if (ret) { dev_err(&pdev->dev, "failed to add GPIO controller\n"); goto err6; } return 0; err6: free_irq(irq[1]->start, pdev); err5: free_irq(irq[0]->start, pdev); err4: em_gio_irq_domain_cleanup(p); err3: iounmap(p->base1); err2: iounmap(p->base0); err1: kfree(p); err0: return ret; }
static int __devinit octeon_i2c_probe(struct platform_device *pdev) { int irq, result = 0; struct octeon_i2c *i2c; struct octeon_i2c_data *i2c_data; struct resource *res_mem; /* All adaptors have an irq. */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (!i2c) { dev_err(&pdev->dev, "kzalloc failed\n"); result = -ENOMEM; goto out; } i2c->dev = &pdev->dev; i2c_data = pdev->dev.platform_data; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem == NULL) { dev_err(i2c->dev, "found no memory resource\n"); result = -ENXIO; goto fail_region; } if (i2c_data == NULL) { dev_err(i2c->dev, "no I2C frequency data\n"); result = -ENXIO; goto fail_region; } i2c->twsi_phys = res_mem->start; i2c->regsize = resource_size(res_mem); i2c->twsi_freq = i2c_data->i2c_freq; i2c->sys_freq = i2c_data->sys_freq; if (!request_mem_region(i2c->twsi_phys, i2c->regsize, res_mem->name)) { dev_err(i2c->dev, "request_mem_region failed\n"); goto fail_region; } i2c->twsi_base = ioremap(i2c->twsi_phys, i2c->regsize); init_waitqueue_head(&i2c->queue); i2c->irq = irq; result = request_irq(i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c); if (result < 0) { dev_err(i2c->dev, "failed to attach interrupt\n"); goto fail_irq; } result = octeon_i2c_initlowlevel(i2c); if (result) { dev_err(i2c->dev, "init low level failed\n"); goto fail_add; } result = octeon_i2c_setclock(i2c); if (result) { dev_err(i2c->dev, "clock init failed\n"); goto fail_add; } i2c->adap = octeon_i2c_ops; i2c->adap.dev.parent = &pdev->dev; i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; i2c_set_adapdata(&i2c->adap, i2c); platform_set_drvdata(pdev, i2c); result = i2c_add_numbered_adapter(&i2c->adap); if (result < 0) { dev_err(i2c->dev, "failed to add adapter\n"); goto fail_add; } dev_info(i2c->dev, "version %s\n", DRV_VERSION); return result; fail_add: platform_set_drvdata(pdev, NULL); free_irq(i2c->irq, i2c); fail_irq: iounmap(i2c->twsi_base); release_mem_region(i2c->twsi_phys, i2c->regsize); fail_region: kfree(i2c); out: return result; };
int msm_gemini_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int gemini_irq; struct resource *gemini_mem, *gemini_io, *gemini_irq_res; void *gemini_base; gemini_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!gemini_mem) { GMN_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } gemini_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!gemini_irq_res) { GMN_PR_ERR("no irq resource?\n"); return -ENODEV; } gemini_irq = gemini_irq_res->start; gemini_io = request_mem_region(gemini_mem->start, resource_size(gemini_mem), pdev->name); if (!gemini_io) { GMN_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } gemini_base = ioremap(gemini_mem->start, resource_size(gemini_mem)); if (!gemini_base) { rc = -ENOMEM; GMN_PR_ERR("%s: ioremap failed\n", __func__); goto fail1; } rc = msm_camio_jpeg_clk_enable(); if (rc) { GMN_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail2; } msm_gemini_hw_init(gemini_base, resource_size(gemini_mem)); rc = request_irq(gemini_irq, handler, IRQF_TRIGGER_RISING, "gemini", context); if (rc) { GMN_PR_ERR("%s: request_irq failed, %d\n", __func__, gemini_irq); goto fail3; } *mem = gemini_mem; *base = gemini_base; *irq = gemini_irq; GMN_DBG("%s:%d] success\n", __func__, __LINE__); return rc; fail3: msm_camio_jpeg_clk_disable(); fail2: iounmap(gemini_base); fail1: release_mem_region(gemini_mem->start, resource_size(gemini_mem)); GMN_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
static int rockchip_spi_probe(struct platform_device *pdev) { struct resource *mem_res; struct rockchip_spi_driver_data *sdd; struct rockchip_spi_info *info = pdev->dev.platform_data; struct dw_spi *dws; int ret, irq; char clk_name[16]; if (!info && pdev->dev.of_node) { info = rockchip_spi_parse_dt(&pdev->dev); if (IS_ERR(info)) return PTR_ERR(info); } if (!info) { dev_err(&pdev->dev, "platform_data missing!\n"); return -ENODEV; } sdd = kzalloc(sizeof(struct rockchip_spi_driver_data), GFP_KERNEL); if (!sdd) { ret = -ENOMEM; goto err_kfree; } sdd->pdev = pdev; sdd->info = info; dws = &sdd->dws; atomic_set(&dws->debug_flag, 0);//debug flag /* Get basic io resource and map it */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); return irq; } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (mem_res == NULL) { dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); ret = -ENXIO; goto err_unmap; } dws->regs = ioremap(mem_res->start, (mem_res->end - mem_res->start) + 1); if (!dws->regs){ ret = -EBUSY; goto err_unmap; } dws->paddr = mem_res->start; dws->iolen = (mem_res->end - mem_res->start) + 1; printk(KERN_INFO "dws->regs: %p\n", dws->regs); //get bus num if (pdev->dev.of_node) { ret = of_alias_get_id(pdev->dev.of_node, "spi"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); goto err_release_mem; } info->bus_num = ret; } else { info->bus_num = pdev->id; } /* Setup clocks */ sdd->clk_spi = devm_clk_get(&pdev->dev, "spi"); if (IS_ERR(sdd->clk_spi)) { dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); ret = PTR_ERR(sdd->clk_spi); goto err_clk; } if (clk_prepare_enable(sdd->clk_spi)) { dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); ret = -EBUSY; goto err_clk; } sprintf(clk_name, "pclk_spi%d", info->src_clk_nr); sdd->pclk_spi = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(sdd->pclk_spi)) { dev_err(&pdev->dev, "Unable to acquire clock '%s'\n", clk_name); ret = PTR_ERR(sdd->pclk_spi); goto err_pclk; } if (clk_prepare_enable(sdd->pclk_spi)) { dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); ret = -EBUSY; goto err_pclk; } clk_set_rate(sdd->clk_spi, info->spi_freq); dws->max_freq = clk_get_rate(sdd->clk_spi); dws->parent_dev = &pdev->dev; dws->bus_num = info->bus_num; dws->num_cs = info->num_cs; dws->irq = irq; dws->clk_spi = sdd->clk_spi; dws->pclk_spi = sdd->pclk_spi; /* * handling for rockchip paltforms, like dma setup, * clock rate, FIFO depth. */ #ifdef CONFIG_SPI_ROCKCHIP_DMA ret = dw_spi_dma_init(dws); if (ret) printk("%s:fail to init dma\n",__func__); #endif ret = dw_spi_add_host(dws); if (ret) goto err_release_mem; platform_set_drvdata(pdev, sdd); printk("%s:num_cs=%d,bus_num=%d,irq=%d,freq=%d ok\n",__func__, info->num_cs, info->bus_num, irq, dws->max_freq); return 0; err_release_mem: release_mem_region(mem_res->start, (mem_res->end - mem_res->start) + 1); err_pclk: clk_disable_unprepare(sdd->pclk_spi); err_clk: clk_disable_unprepare(sdd->clk_spi); err_unmap: iounmap(dws->regs); err_kfree: kfree(sdd); return ret; }
static int __devinit mpcore_wdt_probe(struct platform_device *dev) { struct mpcore_wdt *wdt; struct resource *res; int ret; if (dev->id != -1) return -ENODEV; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto err_out; } wdt = kzalloc(sizeof(struct mpcore_wdt), GFP_KERNEL); if (!wdt) { ret = -ENOMEM; goto err_out; } wdt->dev = &dev->dev; wdt->irq = platform_get_irq(dev, 0); if (wdt->irq < 0) { ret = -ENXIO; goto err_free; } wdt->base = ioremap(res->start, res->end - res->start + 1); if (!wdt->base) { ret = -ENOMEM; goto err_free; } mpcore_wdt_miscdev.parent = &dev->dev; ret = misc_register(&mpcore_wdt_miscdev); if (ret) { dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto err_misc; } ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED, "mpcore_wdt", wdt); if (ret) { dev_printk(KERN_ERR, _dev, "cannot register IRQ%d for watchdog\n", wdt->irq); goto err_irq; } mpcore_wdt_stop(wdt); platform_set_drvdata(&dev->dev, wdt); mpcore_wdt_dev = dev; return 0; err_irq: misc_deregister(&mpcore_wdt_miscdev); err_misc: iounmap(wdt->base); err_free: kfree(wdt); err_out: return ret; }
static int ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_platform_data *pdata = dev->platform_data; struct ata_port_info pi = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ahci_host_priv *hpriv; struct ata_host *host; struct resource *mem; int irq; int n_ports; int i; int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } if (pdata && pdata->init) { rc = pdata->init(dev); if (rc) return rc; } if (pdata && pdata->ata_port_info) pi = *pdata->ata_port_info; hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { rc = -ENOMEM; goto err0; } hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); rc = -ENOMEM; goto err0; } ahci_save_initial_config(dev, hpriv, pdata ? pdata->force_port_map : 0, pdata ? pdata->mask_port_map : 0); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto err0; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; else printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set initial link pm policy */ ap->pm_policy = NOT_AVAILABLE; /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto err0; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, &ahci_platform_sht); if (rc) goto err0; return 0; err0: if (pdata && pdata->exit) pdata->exit(dev); return rc; }
static int s3c_sdi_probe(struct device *dev) { struct platform_device* pdev = to_platform_device(dev); struct mmc_host *mmc; struct s3c_sdi_host *host; int ret; #ifdef CONFIG_S3C2443_EVT1 /* EXTINT0 S3C2443 EVT1 workaround */ u32 tmp; #endif mmc = mmc_alloc_host(sizeof(struct s3c_sdi_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto probe_out; } host = mmc_priv(mmc); spin_lock_init(&host->complete_lock); host->complete_what = COMPLETION_NONE; host->mmc = mmc; #if CONFIG_MACH_TOMTOMGO host->irq_cd = IO_GetInterruptNumber(CD_SD); mmc->removable = 1; #elif CONFIG_ARCH_S3C2460 host->irq_cd = IRQ_EINT3; #elif defined(CONFIG_MACH_SMDK2443) host->irq_cd = IRQ_EINT1; #elif defined(CONFIG_ARCH_MDIRAC3) host->subchannel =S3C_DMA3_SDMMC; // host->irq_cd = IRQ_EINT7; #elif defined CONFIG_ARCH_S3C2412 host->irq_cd = IRQ_EINT18; #endif host->dma = S3C_SDI_DMA; host->mem = platform_get_resource(pdev, IORESOURCE_MEM ,0); if (!host->mem) { printk("failed to get io memory region resource.\n"); ret = -ENOENT; goto probe_free_host; } host->mem = request_mem_region(host->mem->start, RESSIZE(host->mem), pdev->name); if (!host->mem) { printk("failed to request io memory region.\n"); ret = -ENOENT; goto probe_free_host; } /* if there is an error here, check your SoC dependent code. * You must have iotable that contains SDI in it. * by scsuh. */ host->base = S3C24XX_VA_SDI; host->irq = platform_get_irq(pdev, 0); if (host->irq == 0) { printk("failed to get interrupt resouce.\n"); ret = -EINVAL; goto release_memory; } if (request_irq(host->irq, s3c_sdi_irq, 0, DRIVER_NAME, host)) { printk("failed to request sdi interrupt.\n"); ret = -ENOENT; goto release_memory; } #if defined(CONFIG_MACH_SMDK2443) #ifdef CONFIG_S3C2443_EVT1 /* EXTINT0 S3C2443 EVT1 workaround */ tmp = __raw_readl(S3C_EXTINT0); s3c_swap_4bit(tmp); __raw_writel(tmp | (1<<7), S3C_EXTINT0); #endif s3c_gpio_cfgpin(S3C_GPF1, S3C_GPF1_EINT1); #elif defined(CONFIG_ARCH_S3C2460) s3c_gpio_cfgpin(S3C_GPJ3, S3C_GPJ3_EXT_INT3); #elif defined CONFIG_ARCH_S3C2412 s3c_gpio_cfgpin(S3C_GPG10, S3C_GPG10_EINT18); #elif defined CONFIG_ARCH_MDIRAC3 ; #endif #ifdef CONFIG_ARCH_MDIRAC3 if (s3c_dma_request(host->dma,host->subchannel, &s3c_sdi_dma_client,NULL)) { printk("unable to get DMA channel.\n" ); ret = -EBUSY; goto probe_free_irq_cd; } #else INIT_WORK( &host->irq_cd_wq, s3c24xx_irq_cd_handler, mmc ); set_irq_type(host->irq_cd, IRQT_BOTHEDGE); if (host->irq_cd > 0) { if (request_irq(host->irq_cd, s3c_sdi_irq_cd, SA_INTERRUPT, DRIVER_NAME, host)) { printk("failed to request card detect interrupt.\n" ); ret = -ENOENT; goto probe_free_irq; } } if (s3c_dma_request(S3C_SDI_DMA, &s3c_sdi_dma_client, NULL)) { printk("unable to get DMA channel.\n" ); ret = -EBUSY; goto probe_free_irq_cd; } #endif host->clk = clk_get(&pdev->dev, "sdi"); if (IS_ERR(host->clk)) { printk("failed to find clock source.\n"); ret = PTR_ERR(host->clk); host->clk = NULL; goto probe_free_host; } if ((ret = clk_enable(host->clk))) { printk("failed to enable clock source.\n"); goto clk_free; } mmc->ops = &s3c_sdi_ops; mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; mmc->f_min = clk_get_rate(host->clk) / 512; /* you must make sure that our sdmmc block can support * up to 25MHz. by scsuh */ mmc->f_max = 25 * MHZ; mmc->caps = MMC_CAP_4_BIT_DATA; /* * Since we only have a 16-bit data length register, we must * ensure that we don't exceed 2^16-1 bytes in a single request. */ mmc->max_req_size = 65535; /* * Set the maximum segment size. Since we aren't doing DMA * (yet) we are only limited by the data length register. */ mmc->max_seg_size = mmc->max_req_size; /* * Both block size and block count use 12 bit registers. */ mmc->max_blk_size = 4095; mmc->max_blk_count = 4095; printk(KERN_INFO PFX "probe: mapped sdi_base=%p irq=%u irq_cd=%u dma=%u.\n", host->base, host->irq, host->irq_cd, host->dma); platform_set_drvdata(pdev, mmc); init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = s3c_sdi_check_status; host->timer.expires = jiffies + HZ; host->ena_2410_workaround=(IO_GetCpuType( ) == GOCPU_S3C2410); if ((ret = mmc_add_host(mmc))) { printk(KERN_INFO PFX "failed to add mmc host.\n"); goto free_dmabuf; } /* Do CPUFREQ registration. */ #if defined CONFIG_CPU_FREQ && defined CONFIG_S3C24XX_DFS_CPUFREQ host->freq_transition.notifier_call = s3c24xxsdi_freq_transition; host->freq_transition.priority = CPUFREQ_ORDER_S3C24XX_SDCARD_PRIO; host->freq_policy.notifier_call = s3c24xxsdi_freq_policy; cpufreq_register_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); cpufreq_register_notifier(&host->freq_policy, CPUFREQ_POLICY_NOTIFIER); #endif printk(KERN_INFO PFX "initialization done.\n"); return 0; free_dmabuf: clk_disable(host->clk); clk_free: clk_put(host->clk); probe_free_irq_cd: #ifndef CONFIG_ARCH_MDIRAC3 free_irq(host->irq_cd, host); #endif probe_free_irq: free_irq(host->irq, host); release_memory: release_mem_region(host->mem->start, RESSIZE(host->mem)); probe_free_host: mmc_free_host(mmc); probe_out: return ret; }
static int __init coh901327_probe(struct platform_device *pdev) { int ret; u16 val; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; parent = &pdev->dev; physize = resource_size(res); phybase = res->start; if (request_mem_region(phybase, physize, DRV_NAME) == NULL) { ret = -EBUSY; goto out; } virtbase = ioremap(phybase, physize); if (!virtbase) { ret = -ENOMEM; goto out_no_remap; } clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&pdev->dev, "could not get clock\n"); goto out_no_clk; } ret = clk_prepare_enable(clk); if (ret) { dev_err(&pdev->dev, "could not prepare and enable clock\n"); goto out_no_clk_enable; } val = readw(virtbase + U300_WDOG_SR); switch (val) { case U300_WDOG_SR_STATUS_TIMED_OUT: dev_info(&pdev->dev, "watchdog timed out since last chip reset!\n"); coh901327_wdt.bootstatus |= WDIOF_CARDRESET; /* Status will be cleared below */ break; case U300_WDOG_SR_STATUS_NORMAL: dev_info(&pdev->dev, "in normal status, no timeouts have occurred.\n"); break; default: dev_info(&pdev->dev, "contains an illegal status code (%08x)\n", val); break; } val = readw(virtbase + U300_WDOG_D2R); switch (val) { case U300_WDOG_D2R_DISABLE_STATUS_DISABLED: dev_info(&pdev->dev, "currently disabled.\n"); break; case U300_WDOG_D2R_DISABLE_STATUS_ENABLED: dev_info(&pdev->dev, "currently enabled! (disabling it now)\n"); coh901327_disable(); break; default: dev_err(&pdev->dev, "contains an illegal enable/disable code (%08x)\n", val); break; } /* Reset the watchdog */ writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR); irq = platform_get_irq(pdev, 0); if (request_irq(irq, coh901327_interrupt, 0, DRV_NAME " Bark", pdev)) { ret = -EIO; goto out_no_irq; } clk_disable(clk); ret = watchdog_init_timeout(&coh901327_wdt, margin, &pdev->dev); if (ret < 0) coh901327_wdt.timeout = 60; coh901327_wdt.parent = &pdev->dev; ret = watchdog_register_device(&coh901327_wdt); if (ret == 0) dev_info(&pdev->dev, "initialized. timer margin=%d sec\n", margin); else goto out_no_wdog; return 0; out_no_wdog: free_irq(irq, pdev); out_no_irq: clk_disable_unprepare(clk); out_no_clk_enable: clk_put(clk); out_no_clk: iounmap(virtbase); out_no_remap: release_mem_region(phybase, SZ_4K); out: return ret; }
static int hi6401_irq_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct hi6401_irq *irq = NULL; enum of_gpio_flags flags; unsigned int virq; int ret = 0; int i; irq = devm_kzalloc(dev, sizeof(*irq), GFP_KERNEL); if (!irq) { dev_err(dev, "cannot allocate hi6401_irq device info\n"); return -ENOMEM; } platform_set_drvdata(pdev, irq); /* get resources */ irq->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!irq->res) { dev_err(dev, "platform_get_resource err\n"); goto err_exit; } if (!devm_request_mem_region(dev, irq->res->start, resource_size(irq->res), pdev->name)) { dev_err(dev, "cannot claim register memory\n"); goto err_exit; } irq->reg_base_addr = devm_ioremap(dev, irq->res->start, resource_size(irq->res)); if (!irq->reg_base_addr) { dev_err(dev, "cannot map register memory\n"); goto ioremap_err; } /* get pinctrl */ irq->pctrl = devm_pinctrl_get(dev); if (IS_ERR(irq->pctrl)) { dev_err(dev, "could not get pinctrl\n"); goto codec_ssi_get_err; } ret = codec_ssi_iomux_default(irq->pctrl); if (0 != ret) goto codec_ssi_iomux_err; /* get codec ssi clk */ irq->codec_ssi_clk = devm_clk_get(dev, "clk_codecssi"); if (IS_ERR(irq->codec_ssi_clk)) { pr_err("clk_get: codecssi clk not found!\n"); ret = PTR_ERR(irq->codec_ssi_clk); goto codec_ssi_clk_err; } ret = clk_prepare_enable(irq->codec_ssi_clk); if (0 != ret) { pr_err("codec_ssi_clk :clk prepare enable failed !\n"); goto codec_ssi_clk_enable_err; } /* get pmu audio clk */ irq->pmu_audio_clk = devm_clk_get(dev, "clk_pmuaudioclk"); if (IS_ERR(irq->pmu_audio_clk)) { pr_err("_clk_get: pmu_audio_clk not found!\n"); ret = PTR_ERR(irq->pmu_audio_clk); goto pmu_audio_clk_err; } ret = clk_prepare_enable(irq->pmu_audio_clk); if (0 != ret) { pr_err("pmu_audio_clk :clk prepare enable failed !\n"); goto pmu_audio_clk_enable_err; } spin_lock_init(&irq->lock); spin_lock_init(&irq->rw_lock); mutex_init(&irq->sr_mutex); mutex_init(&irq->pll_mutex); wake_lock_init(&irq->wake_lock, WAKE_LOCK_SUSPEND, "hi6401-irq"); irq->dev = dev; /* clear IRQ status */ hi6401_irq_write(irq, HI6401_REG_IRQ_0, 0xFF); hi6401_irq_write(irq, HI6401_REG_IRQ_1, 0xFF); /* mask all irqs */ hi6401_irq_write(irq, HI6401_REG_IRQM_0, 0xFF); hi6401_irq_write(irq, HI6401_REG_IRQM_1, 0xFF); irq->gpio = of_get_gpio_flags(np, 0, &flags); if (0 > irq->gpio) { dev_err(dev, "get gpio flags error\n"); ret = irq->gpio; goto get_gpio_err; } if (!gpio_is_valid(irq->gpio)) { dev_err(dev, "gpio is invalid\n"); ret = -EINVAL; goto get_gpio_err; } ret = gpio_request_one(irq->gpio, GPIOF_IN, "hi6401_irq"); if (0 > ret) { dev_err(dev, "failed to request gpio%d\n", irq->gpio); goto get_gpio_err; } irq->irq = gpio_to_irq(irq->gpio); irq->domain = irq_domain_add_simple(np, HI6401_MAX_IRQS, 0, &hi6401_domain_ops, irq); if (!irq->domain) { dev_err(dev, "irq domain error\n"); ret = -ENODEV; goto gpio_err; } for (i = 0; i < HI6401_MAX_IRQS; i++) { virq = irq_create_mapping(irq->domain, i); if (virq == NO_IRQ) { dev_err(dev, "Failed mapping hwirq\n"); ret = -ENOSPC; goto gpio_err; } irq->irqs[i] = virq; } ret = request_irq(irq->irq, hi6401_irq_handler, IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, "hi6401_irq", irq); if (0 > ret) { dev_err(dev, "could not claim irq %d\n", ret); ret = -ENODEV; goto gpio_err; } irq->hi6401_irq_delay_wq = create_singlethread_workqueue("hi6401_irq_delay_wq"); if (!(irq->hi6401_irq_delay_wq)) { pr_err("%s(%u) : workqueue create failed", __FUNCTION__,__LINE__); ret = -ENOMEM; goto irq_delay_wq_err; } INIT_DELAYED_WORK(&irq->hi6401_irq_delay_work, hi6401_irq_work_func); irq->pll_delay_wq = create_singlethread_workqueue("pll_delay_wq"); if (!(irq->pll_delay_wq)) { pr_err("%s : pll_delay_wq create failed", __FUNCTION__); ret = -ENOMEM; goto pll_delay_wq_err; } INIT_DELAYED_WORK(&irq->pll_delay_work, hi6401_pll_work_func); g_dump_buf = (char*)kmalloc(sizeof(char)*Hi6401_SIZE_MAX, GFP_KERNEL); if (!g_dump_buf) { pr_err("%s : couldn't malloc buffer.\n",__FUNCTION__); ret = -ENOMEM; goto g_dump_buf_kmalloc_err; } memset(g_dump_buf, 0, Hi6401_SIZE_MAX); /* populate sub nodes */ of_platform_populate(np, of_hi6401_irq_child_match_tbl, NULL, dev); if (!hi6401_client) { hi6401_client = dsm_register_client(&dsm_hi6401); } return 0; g_dump_buf_kmalloc_err: if(irq->pll_delay_wq) { cancel_delayed_work(&irq->pll_delay_work); flush_workqueue(irq->pll_delay_wq); destroy_workqueue(irq->pll_delay_wq); } pll_delay_wq_err: if(irq->hi6401_irq_delay_wq) { cancel_delayed_work(&irq->hi6401_irq_delay_work); flush_workqueue(irq->hi6401_irq_delay_wq); destroy_workqueue(irq->hi6401_irq_delay_wq); } irq_delay_wq_err: free_irq(irq->irq, irq); gpio_err: gpio_free(irq->gpio); get_gpio_err: clk_disable_unprepare(irq->pmu_audio_clk); pmu_audio_clk_enable_err: devm_clk_put(dev, irq->pmu_audio_clk); pmu_audio_clk_err: clk_disable_unprepare(irq->codec_ssi_clk); codec_ssi_clk_enable_err: devm_clk_put(dev, irq->codec_ssi_clk); codec_ssi_clk_err: codec_ssi_iomux_idle(irq->pctrl); codec_ssi_iomux_err: pinctrl_put(irq->pctrl); codec_ssi_get_err: devm_iounmap(dev, irq->reg_base_addr); ioremap_err: devm_release_mem_region(dev, irq->res->start, resource_size(irq->res)); err_exit: devm_kfree(dev, irq); return ret; }
static int vf610_adc_probe(struct platform_device *pdev) { struct vf610_adc *info; struct iio_dev *indio_dev; struct resource *mem; int irq; int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc)); if (!indio_dev) { dev_err(&pdev->dev, "Failed allocating iio device\n"); return -ENOMEM; } info = iio_priv(indio_dev); info->dev = &pdev->dev; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); info->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(info->regs)) return PTR_ERR(info->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return irq; } ret = devm_request_irq(info->dev, irq, vf610_adc_isr, 0, dev_name(&pdev->dev), info); if (ret < 0) { dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq); return ret; } info->clk = devm_clk_get(&pdev->dev, "adc"); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed getting clock, err = %ld\n", PTR_ERR(info->clk)); return PTR_ERR(info->clk); } info->vref = devm_regulator_get(&pdev->dev, "vref"); if (IS_ERR(info->vref)) return PTR_ERR(info->vref); ret = regulator_enable(info->vref); if (ret) return ret; info->vref_uv = regulator_get_voltage(info->vref); of_property_read_u32_array(pdev->dev.of_node, "fsl,adck-max-frequency", info->max_adck_rate, 3); ret = of_property_read_u32(pdev->dev.of_node, "min-sample-time", &info->adc_feature.default_sample_time); if (ret) info->adc_feature.default_sample_time = DEFAULT_SAMPLE_TIME; platform_set_drvdata(pdev, indio_dev); init_completion(&info->completion); indio_dev->name = dev_name(&pdev->dev); indio_dev->dev.parent = &pdev->dev; indio_dev->dev.of_node = pdev->dev.of_node; indio_dev->info = &vf610_adc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = vf610_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels); ret = clk_prepare_enable(info->clk); if (ret) { dev_err(&pdev->dev, "Could not prepare or enable the clock.\n"); goto error_adc_clk_enable; } vf610_adc_cfg_init(info); vf610_adc_hw_init(info); ret = iio_device_register(indio_dev); if (ret) { dev_err(&pdev->dev, "Couldn't register the device.\n"); goto error_iio_device_register; } return 0; error_iio_device_register: clk_disable_unprepare(info->clk); error_adc_clk_enable: regulator_disable(info->vref); return ret; }
static int ftr_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct ftr_dev_node_info *ptr; struct resource *mem_res; struct clk *pdm_clk; int ret; u8 version = 0; pr_debug("%s: me = %p, parent = %p\n", __func__, pdev, pdev->dev.parent); mutex_lock(&rficlock); if (n_dev >= RFIC_DEVICE_NUM) { pr_warn("%s: Invalid devices %d\n", __func__, n_dev); mutex_unlock(&rficlock); return -EINVAL; } if (!n_dev) { rfbid = rf_interface_id(); if ((rfbid != 0xff) && (rfbid != 0)) rfbid = rfbid & RF_TYPE_48; switch (rfbid) { case RF_TYPE_16: ftr_regulator_init(pdev); break; case RF_TYPE_32: glu_regulator_init(pdev); break; case RF_TYPE_48: mtr_regulator_init(pdev); break; default: pr_warn("%s:Regulators not turned ON %d\n", __func__, rfbid); } rfbid = rf_interface_id(); pr_info("%s: RF Board Id 0x%x\n", __func__, rfbid); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); grfc_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(grfc_base)) { mutex_unlock(&rficlock); return PTR_ERR(grfc_base); } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); wf_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(wf_base)) { mutex_unlock(&rficlock); return PTR_ERR(wf_base); } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); pdm_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(pdm_base)) { mutex_unlock(&rficlock); return PTR_ERR(pdm_base); } ret = device_create_file(&pdev->dev, &dev_attr_rfboard_id); WARN_ON(ret); pdm_clk = clk_get(&pdev->dev, "ahb_clk"); if (IS_ERR(pdm_clk)) { pdm_clk = NULL; pr_err("%s: AHB CLK is NULL\n", __func__); } else { pr_debug("%s: AHB CLK is 0x%x\n", __func__, (unsigned int)pdm_clk); clk_prepare(pdm_clk); clk_enable(pdm_clk); } pdm_clk = clk_get(&pdev->dev, "pdm2_clk"); if (IS_ERR(pdm_clk)) { pdm_clk = NULL; pr_err("%s: PDM2 CLK is NULL\n", __func__); } else { pr_debug("%s: PDM2 CLK is 0x%x\n", __func__, (unsigned int)pdm_clk); clk_prepare(pdm_clk); clk_enable(pdm_clk); } if ((rfbid > RF_TYPE_48) && (rfbid != 0xff)) { fsm9900_mtr_init(); pdm_mtr_enable(); pr_info("%s: MTR PDM Enabled\n", __func__); } else if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_32)) { fsm9900_rfic_init(); pdm_enable(); pr_info("%s: PDM Enabled\n", __func__); } else if ((rfbid > RF_TYPE_32) && (rfbid < RF_TYPE_48)) { fsm9900_gluon_init(); pr_info("%s: Gluon Enabled\n", __func__); } else { pr_warn("%s:PDMs not configured %d\n", __func__, rfbid); } } ptr = ftr_dev_info + n_dev; ptr->dev = &pdev->dev; if ((n_dev >= 1) && (n_dev <= 7)) { struct ssbi *ssbi = platform_get_drvdata(to_platform_device(pdev->dev.parent)); ptr->pvcaddr = ssbi->base; if ((rfbid > RF_TYPE_48) && (n_dev <= 4)) { ssbi->controller_type = FSM_SBI_CTRL_GENI_SSBI2_ARBITER; set_ssbi_mode_2(ssbi->base); pr_debug("%s: SSBI2 = 0x%x\n", __func__, ssbi->controller_type); rfic_pvc_enable(ssbi->base, 3); } else { ssbi->controller_type = FSM_SBI_CTRL_GENI_SSBI_ARBITER; set_ssbi_mode_1(ssbi->base); pr_debug("%s: SSBI1 = 0x%x\n", __func__, ssbi->controller_type); if ((n_dev == 1) || (n_dev == 2)) rfic_pvc_enable(ssbi->base, 1); if ((n_dev == 3) && (rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_32)) rfic_pvc_enable(ssbi->base, 2); } platform_set_drvdata(to_platform_device(pdev->dev.parent), ssbi); } if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_48) && (n_dev == 1)) { ssbi_write(pdev->dev.parent, 0xff, &version, 1); ssbi_read(pdev->dev.parent, 0x2, &version, 1); pr_info("%s: FTR1 Version = %02x\n", __func__, version); ptr->grfcctrladdr = grfc_base + 0x10; /* grp 4 */ ptr->grfcmaskaddr = grfc_base + 0x30; __raw_writel(0x00001800, ptr->grfcmaskaddr); ptr->maskvalue = 0x00001800; ptr->busselect[TX1_BUS] = 0x00000000; ptr->busselect[TX2_BUS] = 0x00001000; ptr->busselect[MISC_BUS] = 0x00000800; ptr->busselect[RX_BUS] = 0x00001800; } else if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_48) && (n_dev == 2)) { ssbi_write(pdev->dev.parent, 0xff, &version, 1); ssbi_read(pdev->dev.parent, 0x2, &version, 1); pr_info("%s: FTR2 Version = %02x\n", __func__, version); ptr->grfcctrladdr = grfc_base + 0x14; /* grp 5*/ ptr->grfcmaskaddr = grfc_base + 0x34; __raw_writel(0x00000600, ptr->grfcmaskaddr); ptr->maskvalue = 0x00000600; ptr->busselect[TX1_BUS] = 0x000000; ptr->busselect[TX2_BUS] = 0x00000400; ptr->busselect[MISC_BUS] = 0x00000200; ptr->busselect[RX_BUS] = 0x00000600; } mutex_init(&ptr->lock); if (rfbid < RF_TYPE_48) { ret = misc_register(ftr_misc_dev + n_dev); if (ret < 0) { misc_deregister(ftr_misc_dev + n_dev); mutex_unlock(&rficlock); return ret; } } else { ret = misc_register(mtr_misc_dev + n_dev); if (ret < 0) { misc_deregister(mtr_misc_dev + n_dev); mutex_unlock(&rficlock); return ret; } } n_dev++; pr_debug("%s: num_of_ssbi_devices = %d\n", __func__, n_dev); mutex_unlock(&rficlock); return of_platform_populate(np, NULL, NULL, &pdev->dev); }
static int fimg2d_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct fimg2d_platdata *pdata; #ifdef CONFIG_OF struct device *dev = &pdev->dev; int id = 0; #else pdata = to_fimg2d_plat(&pdev->dev); #endif dev_info(&pdev->dev, "++%s\n", __func__); #ifdef CONFIG_OF if (dev->of_node) { id = of_alias_get_id(pdev->dev.of_node, "fimg2d"); } else { id = pdev->id; pdata = dev->platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } } #else if (!to_fimg2d_plat(&pdev->dev)) { fimg2d_err("failed to get platform data\n"); return -ENOMEM; } #endif /* global structure */ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { fimg2d_err("failed to allocate memory for controller\n"); return -ENOMEM; } #ifdef CONFIG_OF pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { fimg2d_err("failed to allocate memory for controller\n"); kfree(ctrl); return -ENOMEM; } ctrl->pdata = pdata; g2d_parse_dt(dev->of_node, ctrl->pdata); #endif /* setup global ctrl */ ret = fimg2d_setup_controller(ctrl); if (ret) { fimg2d_err("failed to setup controller\n"); goto drv_free; } ctrl->dev = &pdev->dev; /* memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { fimg2d_err("failed to get resource\n"); ret = -ENOENT; goto drv_free; } ctrl->mem = request_mem_region(res->start, resource_size(res), pdev->name); if (!ctrl->mem) { fimg2d_err("failed to request memory region\n"); ret = -ENOMEM; goto drv_free; } /* ioremap */ ctrl->regs = ioremap(res->start, resource_size(res)); if (!ctrl->regs) { fimg2d_err("failed to ioremap for SFR\n"); ret = -ENOENT; goto mem_free; } fimg2d_debug("base address: 0x%lx\n", (unsigned long)res->start); /* irq */ ctrl->irq = platform_get_irq(pdev, 0); if (!ctrl->irq) { fimg2d_err("failed to get irq resource\n"); ret = -ENOENT; goto reg_unmap; } fimg2d_debug("irq: %d\n", ctrl->irq); ret = request_irq(ctrl->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, ctrl); if (ret) { fimg2d_err("failed to request irq\n"); ret = -ENOENT; goto reg_unmap; } ret = fimg2d_clk_setup(ctrl); if (ret) { fimg2d_err("failed to setup clk\n"); ret = -ENOENT; goto irq_free; } spin_lock_init(&ctrl->qoslock); #ifdef CONFIG_PM_RUNTIME pm_runtime_enable(ctrl->dev); fimg2d_info("enable runtime pm\n"); #else fimg2d_clk_on(ctrl); #endif #ifdef FIMG2D_IOVMM_PAGETABLE exynos_create_iovmm(dev, 3, 3); #endif iovmm_set_fault_handler(dev, fimg2d_sysmmu_fault_handler, ctrl); fimg2d_debug("register sysmmu page fault handler\n"); /* misc register */ ret = misc_register(&fimg2d_dev); if (ret) { fimg2d_err("failed to register misc driver\n"); goto clk_release; } fimg2d_pm_qos_add(ctrl); dev_info(&pdev->dev, "fimg2d registered successfully\n"); return 0; clk_release: #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(ctrl->dev); #else fimg2d_clk_off(ctrl); #endif fimg2d_clk_release(ctrl); irq_free: free_irq(ctrl->irq, NULL); reg_unmap: iounmap(ctrl->regs); mem_free: release_mem_region(res->start, resource_size(res)); drv_free: #ifdef BLIT_WORKQUE if (ctrl->work_q) destroy_workqueue(ctrl->work_q); #endif mutex_destroy(&ctrl->drvlock); #ifdef CONFIG_OF kfree(pdata); #endif kfree(ctrl); return ret; }
static int __devinit s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata; struct s5p_ehci_hcd *s5p_ehci; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource *res; int irq; int err; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "No platform data defined\n"); return -EINVAL; } s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL); if (!s5p_ehci) return -ENOMEM; s5p_ehci->dev = &pdev->dev; hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); err = -ENOMEM; goto fail_hcd; } s5p_ehci->hcd = hcd; s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); if (IS_ERR(s5p_ehci->clk)) { dev_err(&pdev->dev, "Failed to get usbhost clock\n"); err = PTR_ERR(s5p_ehci->clk); goto fail_clk; } err = clk_enable(s5p_ehci->clk); if (err) goto fail_clken; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); err = -ENXIO; goto fail_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(res->start, resource_size(res)); if (!hcd->regs) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); err = -ENOMEM; goto fail_io; } irq = platform_get_irq(pdev, 0); if (!irq) { dev_err(&pdev->dev, "Failed to get IRQ\n"); err = -ENODEV; goto fail; } if (pdata->phy_init) pdata->phy_init(pdev, S5P_USB_PHY_HOST); ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); s5p_ehci_configurate(hcd); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = readl(&ehci->caps->hcs_params); err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD\n"); goto fail; } platform_set_drvdata(pdev, s5p_ehci); create_ehci_sys_file(ehci); s5p_ehci->power_on = 1; #ifdef CONFIG_USB_EXYNOS_SWITCH if (samsung_board_rev_is_0_0()) ehci_hub_control(ehci_to_hcd(ehci), ClearPortFeature, USB_PORT_FEAT_POWER, 1, NULL, 0); #endif #ifdef CONFIG_USB_SUSPEND pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); #endif return 0; fail: iounmap(hcd->regs); fail_io: clk_disable(s5p_ehci->clk); fail_clken: clk_put(s5p_ehci->clk); fail_clk: usb_put_hcd(hcd); fail_hcd: kfree(s5p_ehci); return err; }
static int __devinit c67x00_drv_probe(struct platform_device *pdev) { struct c67x00_device *c67x00; struct c67x00_platform_data *pdata; struct resource *res, *res2; int ret, i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res2) return -ENODEV; pdata = pdev->dev.platform_data; if (!pdata) return -ENODEV; c67x00 = kzalloc(sizeof(*c67x00), GFP_KERNEL); if (!c67x00) return -ENOMEM; if (!request_mem_region(res->start, res->end - res->start + 1, pdev->name)) { dev_err(&pdev->dev, "Memory region busy\n"); ret = -EBUSY; goto request_mem_failed; } c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1); if (!c67x00->hpi.base) { dev_err(&pdev->dev, "Unable to map HPI registers\n"); ret = -EIO; goto map_failed; } spin_lock_init(&c67x00->hpi.lock); c67x00->hpi.regstep = pdata->hpi_regstep; c67x00->pdata = pdev->dev.platform_data; c67x00->pdev = pdev; c67x00_ll_init(c67x00); c67x00_ll_hpi_reg_init(c67x00); ret = request_irq(res2->start, c67x00_irq, 0, pdev->name, c67x00); if (ret) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto request_irq_failed; } ret = c67x00_ll_reset(c67x00); if (ret) { dev_err(&pdev->dev, "Device reset failed\n"); goto reset_failed; } for (i = 0; i < C67X00_SIES; i++) c67x00_probe_sie(&c67x00->sie[i], c67x00, i); platform_set_drvdata(pdev, c67x00); return 0; reset_failed: free_irq(res2->start, c67x00); request_irq_failed: iounmap(c67x00->hpi.base); map_failed: release_mem_region(res->start, res->end - res->start + 1); request_mem_failed: kfree(c67x00); return ret; }
static int hisi_thermal_probe(struct platform_device *pdev) { struct hisi_thermal_data *data; struct resource *res; int i; int ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; mutex_init(&data->thermal_lock); data->pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->regs)) { dev_err(&pdev->dev, "failed to get io address\n"); return PTR_ERR(data->regs); } data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) return data->irq; ret = devm_request_threaded_irq(&pdev->dev, data->irq, hisi_thermal_alarm_irq, hisi_thermal_alarm_irq_thread, 0, "hisi_thermal", data); if (ret < 0) { dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); return ret; } platform_set_drvdata(pdev, data); data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); if (IS_ERR(data->clk)) { ret = PTR_ERR(data->clk); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get thermal clk: %d\n", ret); return ret; } /* enable clock for thermal */ ret = clk_prepare_enable(data->clk); if (ret) { dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); return ret; } for (i = 0; i < HISI_MAX_SENSORS; ++i) { ret = hisi_thermal_register_sensor(pdev, data, &data->sensors[i], i); if (ret) { dev_err(&pdev->dev, "failed to register thermal sensor: %d\n", ret); goto err_get_sensor_data; } } hisi_thermal_enable_bind_irq_sensor(data); data->irq_enabled = true; for (i = 0; i < HISI_MAX_SENSORS; i++) hisi_thermal_toggle_sensor(&data->sensors[i], true); return 0; err_get_sensor_data: clk_disable_unprepare(data->clk); return ret; }
static int mxc_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int new_margin; int bootr; static struct watchdog_info ident = { .identity = "MXC Watchdog", .options = WDIOF_SETTIMEOUT, .firmware_version = 0, }; switch (cmd) { default: return -ENOIOCTLCMD; case WDIOC_GETSUPPORT: return copy_to_user((struct watchdog_info __user *)arg, &ident, sizeof(ident)); case WDIOC_GETSTATUS: return put_user(0, (int __user *)arg); case WDIOC_GETBOOTSTATUS: bootr = mxc_wdt_get_bootreason(wdt_base_reg); return put_user(bootr, (int __user *)arg); case WDIOC_KEEPALIVE: mxc_wdt_ping(wdt_base_reg); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, (int __user *)arg)) return -EFAULT; mxc_wdt_adjust_timeout(new_margin); mxc_wdt_disable(wdt_base_reg); mxc_wdt_set_timeout(wdt_base_reg); mxc_wdt_enable(wdt_base_reg); mxc_wdt_ping(wdt_base_reg); return 0; case WDIOC_GETTIMEOUT: mxc_wdt_ping(wdt_base_reg); new_margin = mxc_wdt_get_timeout(wdt_base_reg); return put_user(new_margin, (int __user *)arg); } } static struct file_operations mxc_wdt_fops = { .owner = THIS_MODULE, .write = mxc_wdt_write, .ioctl = mxc_wdt_ioctl, .open = mxc_wdt_open, .release = mxc_wdt_release, }; static struct miscdevice mxc_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &mxc_wdt_fops }; static int __init mxc_wdt_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct resource *res, *mem; int ret; /* reserve static register mappings */ res = platform_get_resource(pdev, IORESOURCE_MEM, dev_num); if (!res) return -ENOENT; mem = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (mem == NULL) return -EBUSY; dev_set_drvdata(dev, mem); wdt_base_reg = IO_ADDRESS(res->start); mxc_wdt_disable(wdt_base_reg); mxc_wdt_adjust_timeout(timer_margin); mxc_wdt_users = 0; mxc_wdt_miscdev.dev = dev; ret = misc_register(&mxc_wdt_miscdev); if (ret) goto fail; pr_info("MXC Watchdog # %d Timer: initial timeout %d sec\n", dev_num, timer_margin); return 0; fail: release_resource(mem); pr_info("MXC Watchdog Probe failed\n"); return ret; } static void mxc_wdt_shutdown(struct device *dev) { struct resource *res = dev_get_drvdata(dev); mxc_wdt_disable(res->start); pr_info("MXC Watchdog # %d shutdown\n", dev_num); } static int __exit mxc_wdt_remove(struct device *dev) { struct resource *mem = dev_get_drvdata(dev); misc_deregister(&mxc_wdt_miscdev); release_resource(mem); pr_info("MXC Watchdog # %d removed\n", dev_num); return 0; } #ifdef CONFIG_PM /* REVISIT ... not clear this is the best way to handle system suspend; and * it's very inappropriate for selective device suspend (e.g. suspending this * through sysfs rather than by stopping the watchdog daemon). Also, this * may not play well enough with NOWAYOUT... */ static int mxc_wdt_suspend(struct device *dev, u32 state, u32 level) { struct resource *res = dev_get_drvdata(dev); if (level == SUSPEND_POWER_DOWN && mxc_wdt_users) mxc_wdt_disable(res->start); return 0; } static int mxc_wdt_resume(struct device *dev, u32 level) { struct resource *res = dev_get_drvdata(dev); if (level == RESUME_POWER_ON && mxc_wdt_users) { mxc_wdt_enable(res->start); mxc_wdt_ping(res->start); } return 0; } #else #define mxc_wdt_suspend NULL #define mxc_wdt_resume NULL #endif static struct device_driver mxc_wdt_driver = { .name = "mxc_wdt", .bus = &platform_bus_type, .probe = mxc_wdt_probe, .shutdown = mxc_wdt_shutdown, .remove = __exit_p(mxc_wdt_remove), .suspend = mxc_wdt_suspend, .resume = mxc_wdt_resume, }; static int __init mxc_wdt_init(void) { pr_info("MXC WatchDog Driver %s\n", DVR_VER); if ((timer_margin < TIMER_MARGIN_MIN) || (timer_margin > TIMER_MARGIN_MAX)) { pr_info("MXC watchdog error. wrong timer_margin %d\n", timer_margin); pr_info(" Range: %d to %d seconds\n", TIMER_MARGIN_MIN, TIMER_MARGIN_MAX); return -EINVAL; } return driver_register(&mxc_wdt_driver); } static void __exit mxc_wdt_exit(void) { driver_unregister(&mxc_wdt_driver); pr_info("MXC WatchDog Driver removed\n"); } module_init(mxc_wdt_init); module_exit(mxc_wdt_exit); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int s3c_pp_probe(struct platform_device *pdev) { struct resource *res; int ret; int tmp; int i; // Use DOUTmpll source clock as a scaler clock tmp = __raw_readl(S3C_CLK_SRC); tmp &=~(0x3<<28); tmp |= (0x1<<28); __raw_writel(tmp, S3C_CLK_SRC); /* find the IRQs */ s3c_pp_irq = platform_get_irq(pdev, 0); if(s3c_pp_irq <= 0) { printk(KERN_ERR PFX "failed to get irq resouce\n"); return -ENOENT; } /* get the memory region for the post processor driver */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if(res == NULL) { printk(KERN_ERR PFX "failed to get memory region resouce\n"); return -ENOENT; } s3c_pp_mem = request_mem_region(res->start, res->end-res->start+1, pdev->name); if(s3c_pp_mem == NULL) { printk(KERN_ERR PFX "failed to reserve memory region\n"); return -ENOENT; } s3c_pp_base = ioremap(s3c_pp_mem->start, s3c_pp_mem->end - res->start + 1); if(s3c_pp_base == NULL) { printk(KERN_ERR PFX "failed ioremap\n"); return -ENOENT; } pp_clock = clk_get(&pdev->dev, "post"); if(pp_clock == NULL) { printk(KERN_ERR PFX "failed to find post clock source\n"); return -ENOENT; } clk_enable(pp_clock); h_clk = clk_get(&pdev->dev, "hclk"); if(h_clk == NULL) { printk(KERN_ERR PFX "failed to find h_clk clock source\n"); return -ENOENT; } init_waitqueue_head(&waitq); ret = misc_register(&s3c_pp_dev); if (ret) { printk (KERN_ERR "cannot register miscdev on minor=%d (%d)\n", PP_MINOR, ret); return ret; } ret = request_irq(s3c_pp_irq, (irq_handler_t) s3c_pp_isr,IRQF_DISABLED, pdev->name, NULL); if (ret) { printk(KERN_ERR "request_irq(PP) failed.\n"); return ret; } h_mutex = (struct mutex *) kmalloc(sizeof(struct mutex), GFP_DMA|GFP_ATOMIC ); if (h_mutex == NULL) return -1; mutex_init(h_mutex); mem_alloc_mutex = (struct mutex *) kmalloc(sizeof(struct mutex), GFP_DMA|GFP_ATOMIC ); if (mem_alloc_mutex == NULL) return -1; mutex_init(mem_alloc_mutex); // initialzie instance infomation s3c_pp_instance_info.running_instance_no = -1; s3c_pp_instance_info.last_running_instance_no = -1; s3c_pp_instance_info.in_use_instance_count = 0; s3c_pp_instance_info.dma_mode_instance_count = 0; s3c_pp_instance_info.fifo_mode_instance_no = -1; for ( i=0; i < PP_MAX_NO_OF_INSTANCES; i++ ) s3c_pp_instance_info.instance_state[i] = PP_INSTANCE_FREE; /* check to see if everything is setup correctly */ return 0; }
static int host1x_probe(struct platform_device *pdev) { const struct of_device_id *id; struct host1x *host; struct resource *regs; int syncpt_irq; int err; id = of_match_device(host1x_of_match, &pdev->dev); if (!id) return -EINVAL; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "failed to get registers\n"); return -ENXIO; } syncpt_irq = platform_get_irq(pdev, 0); if (syncpt_irq < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); return -ENXIO; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); host->dev = &pdev->dev; host->info = id->data; /* set common host1x device data */ platform_set_drvdata(pdev, host); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); dma_set_mask_and_coherent(host->dev, host->info->dma_mask); if (host->info->init) { err = host->info->init(host); if (err) return err; } host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = PTR_ERR(host->clk); return err; } host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } if (iommu_present(&platform_bus_type)) { struct iommu_domain_geometry *geometry; unsigned long order; host->domain = iommu_domain_alloc(&platform_bus_type); if (!host->domain) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); if (err) goto fail_free_domain; geometry = &host->domain->geometry; order = __ffs(host->domain->pgsize_bitmap); init_iova_domain(&host->iova, 1UL << order, geometry->aperture_start >> order, geometry->aperture_end >> order); host->iova_end = geometry->aperture_end; } err = host1x_channel_list_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); goto fail_detach_device; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); goto fail_detach_device; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); goto fail_unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); goto fail_reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); goto fail_deinit_syncpt; } host1x_debug_init(host); err = host1x_register(host); if (err < 0) goto fail_deinit_intr; return 0; fail_deinit_intr: host1x_intr_deinit(host); fail_deinit_syncpt: host1x_syncpt_deinit(host); fail_reset_assert: reset_control_assert(host->rst); fail_unprepare_disable: clk_disable_unprepare(host->clk); fail_detach_device: if (host->domain) { put_iova_domain(&host->iova); iommu_detach_device(host->domain, &pdev->dev); } fail_free_domain: if (host->domain) iommu_domain_free(host->domain); return err; }
static int __devinit ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_platform_data *pdata = dev_get_platdata(dev); const struct platform_device_id *id = platform_get_device_id(pdev); struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ahci_host_priv *hpriv; struct ata_host *host; struct resource *mem; int irq; int n_ports; int i; int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } if (pdata && pdata->ata_port_info) pi = *pdata->ata_port_info; hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { dev_err(dev, "can't alloc ahci_host_priv\n"); return -ENOMEM; } hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); return -ENOMEM; } /* * Some platforms might need to prepare for mmio region access, * which could be done in the following init call. So, the mmio * region shouldn't be accessed before init (if provided) has * returned successfully. */ if (pdata && pdata->init) { rc = pdata->init(dev, hpriv->mmio); if (rc) return rc; } ahci_save_initial_config(dev, hpriv, pdata ? pdata->force_port_map : 0, pdata ? pdata->mask_port_map : 0); /* prepare host */ #ifndef CONFIG_TIVO /* Do not use NCQ for processing TiVo disk I/O */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; #endif if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto err0; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; else printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto err0; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, &ahci_platform_sht); if (rc) goto err0; return 0; err0: if (pdata && pdata->exit) pdata->exit(dev); return rc; }
static int rk3036_tve_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct resource *res; const struct of_device_id *match; int i; int val = 0; match = of_match_node(rk3036_tve_dt_ids, np); if (!match) return PTR_ERR(match); rk3036_tve = devm_kzalloc(&pdev->dev, sizeof(struct rk3036_tve), GFP_KERNEL); if (!rk3036_tve) { dev_err(&pdev->dev, "rk3036 tv encoder device kmalloc fail!"); return -ENOMEM; } if (of_property_read_u32(np, "test_mode", &val)) rk3036_tve->test_mode = 0; else rk3036_tve->test_mode = val; if (!strcmp(match->compatible, "rockchip,rk3036-tve")) { rk3036_tve->soctype = SOC_RK3036; rk3036_tve->inputformat = INPUT_FORMAT_RGB; } else if (!strcmp(match->compatible, "rockchip,rk312x-tve")) { rk3036_tve->soctype = SOC_RK312X; rk3036_tve->inputformat = INPUT_FORMAT_YUV; } else { dev_err(&pdev->dev, "It is not a valid tv encoder!"); kfree(rk3036_tve); return -ENOMEM; } platform_set_drvdata(pdev, rk3036_tve); rk3036_tve->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rk3036_tve->reg_phy_base = res->start; rk3036_tve->len = resource_size(res); rk3036_tve->regbase = ioremap(res->start, rk3036_tve->len); if (IS_ERR(rk3036_tve->regbase)) { dev_err(&pdev->dev, "rk3036 tv encoder device map registers failed!"); return PTR_ERR(rk3036_tve->regbase); } INIT_LIST_HEAD(&(rk3036_tve->modelist)); for (i = 0; i < ARRAY_SIZE(rk3036_cvbs_mode); i++) fb_add_videomode(&rk3036_cvbs_mode[i], &(rk3036_tve->modelist)); if (cvbsformat >= 0) { rk3036_tve->mode = (struct fb_videomode *)&rk3036_cvbs_mode[cvbsformat]; rk3036_tve->enable = 1; tve_switch_fb(rk3036_tve->mode, 1); } else { rk3036_tve->mode = (struct fb_videomode *)&rk3036_cvbs_mode[1]; } rk3036_tve->ddev = rk_display_device_register(&display_cvbs, &pdev->dev, NULL); rk_display_device_enable(rk3036_tve->ddev); fb_register_client(&tve_fb_notifier); cvbsformat = -1; dev_info(&pdev->dev, "%s tv encoder probe ok\n", match->compatible); return 0; }
struct tegra_usb_phy *tegra_usb_phy_open(struct platform_device *pdev) { struct tegra_usb_phy *phy; struct tegra_usb_platform_data *pdata; struct resource *res; int err; int plat_data_size = sizeof(struct tegra_usb_platform_data); DBG("%s(%d) inst:[%d]\n", __func__, __LINE__, pdev->id); pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "inst:[%d] Platform data missing\n", pdev->id); err = -EINVAL; goto fail_inval; } phy = devm_kzalloc(&pdev->dev, sizeof(struct tegra_usb_phy), GFP_KERNEL); if (!phy) { ERR("inst:[%d] malloc usb phy failed\n", pdev->id); err = -ENOMEM; goto fail_nomem; } phy->pdata = devm_kzalloc(&pdev->dev, plat_data_size, GFP_KERNEL); if (!phy->pdata) { ERR("inst:[%d] malloc usb phy pdata failed\n", pdev->id); devm_kfree(&pdev->dev, phy); err = -ENOMEM; goto fail_nomem; } memcpy(phy->pdata, pdata, plat_data_size); phy->pdev = pdev; phy->inst = pdev->id; print_usb_plat_data_info(phy); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ERR("inst:[%d] failed to get I/O memory\n", phy->inst); err = -ENXIO; goto fail_io; } phy->regs = ioremap(res->start, resource_size(res)); if (!phy->regs) { ERR("inst:[%d] Failed to remap I/O memory\n", phy->inst); err = -ENOMEM; goto fail_io; } phy->vdd_reg = regulator_get(&pdev->dev, "avdd_usb"); if (IS_ERR_OR_NULL(phy->vdd_reg)) { ERR("inst:[%d] couldn't get regulator avdd_usb: %ld\n", phy->inst, PTR_ERR(phy->vdd_reg)); phy->vdd_reg = NULL; err = PTR_ERR(phy->vdd_reg); goto fail_io; } err = tegra_usb_phy_get_clocks(phy); if (err) { ERR("inst:[%d] Failed to init clocks\n", phy->inst); goto fail_clk; } if (phy->pdata->op_mode == TEGRA_USB_OPMODE_DEVICE) { if (phy->pdata->u_data.dev.vbus_pmu_irq) { err = request_threaded_irq( phy->pdata->u_data.dev.vbus_pmu_irq, NULL, usb_phy_dev_vbus_pmu_irq_thr, IRQF_SHARED, "usb_pmu_vbus_irq", phy); if (err) { ERR("inst:[%d] Failed to register IRQ\n", phy->inst); goto fail_init; } } else { clk_enable(phy->ctrlr_clk); } } else { if (phy->pdata->u_data.host.vbus_reg) { phy->vbus_reg = regulator_get(NULL, phy->pdata->u_data.host.vbus_reg); if (WARN_ON(IS_ERR_OR_NULL(phy->vbus_reg))) { ERR("failed to get regulator vdd_vbus_usb: %ld,\ instance : %d\n", PTR_ERR(phy->vbus_reg), phy->inst); err = PTR_ERR(phy->vbus_reg); goto fail_init; } } else {
static int t7l66xb_probe(struct platform_device *dev) { struct t7l66xb_platform_data *pdata = dev->dev.platform_data; struct t7l66xb *t7l66xb; struct resource *iomem, *rscr; int ret; if (pdata == NULL) return -EINVAL; iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL); if (!t7l66xb) return -ENOMEM; spin_lock_init(&t7l66xb->lock); platform_set_drvdata(dev, t7l66xb); ret = platform_get_irq(dev, 0); if (ret >= 0) t7l66xb->irq = ret; else goto err_noirq; t7l66xb->irq_base = pdata->irq_base; t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K"); if (IS_ERR(t7l66xb->clk32k)) { ret = PTR_ERR(t7l66xb->clk32k); goto err_clk32k_get; } t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M"); if (IS_ERR(t7l66xb->clk48m)) { ret = PTR_ERR(t7l66xb->clk48m); goto err_clk48m_get; } rscr = &t7l66xb->rscr; rscr->name = "t7l66xb-core"; rscr->start = iomem->start; rscr->end = iomem->start + 0xff; rscr->flags = IORESOURCE_MEM; ret = request_resource(iomem, rscr); if (ret) goto err_request_scr; t7l66xb->scr = ioremap(rscr->start, resource_size(rscr)); if (!t7l66xb->scr) { ret = -ENOMEM; goto err_ioremap; } clk_enable(t7l66xb->clk48m); if (pdata && pdata->enable) pdata->enable(dev); /* Mask all interrupts */ tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR); printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n", dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID), (unsigned long)iomem->start, t7l66xb->irq); t7l66xb_attach_irq(dev); t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data; t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data); ret = mfd_add_devices(&dev->dev, dev->id, t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), iomem, t7l66xb->irq_base); if (!ret) return 0; t7l66xb_detach_irq(dev); iounmap(t7l66xb->scr); err_ioremap: release_resource(&t7l66xb->rscr); err_request_scr: clk_put(t7l66xb->clk48m); err_clk48m_get: clk_put(t7l66xb->clk32k); err_clk32k_get: err_noirq: kfree(t7l66xb); return ret; }
static int __devinit rk29_wdt_probe(struct platform_device *pdev) { struct resource *res; struct device *dev; int started = 0; int ret; int size; dev = &pdev->dev; wdt_dev = &pdev->dev; /* get the memory region for the watchdog timer */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "no memory resource specified\n"); return -ENOENT; } size = (res->end - res->start) + 1; wdt_mem = request_mem_region(res->start, size, pdev->name); if (wdt_mem == NULL) { dev_err(dev, "failed to get memory region\n"); ret = -ENOENT; goto err_req; } wdt_base = ioremap(res->start, size); if (wdt_base == NULL) { dev_err(dev, "failed to ioremap() region\n"); ret = -EINVAL; goto err_req; } DBG("probe: mapped wdt_base=%p\n", wdt_base); #ifdef CONFIG_RK29_FEED_DOG_BY_INTE wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (wdt_irq == NULL) { dev_err(dev, "no irq resource specified\n"); ret = -ENOENT; goto err_map; } ret = request_irq(wdt_irq->start, rk29_wdt_irq_handler, 0, pdev->name, pdev); if (ret != 0) { dev_err(dev, "failed to install irq (%d)\n", ret); goto err_map; } #endif wdt_clock = clk_get(&pdev->dev, "wdt"); if (IS_ERR(wdt_clock)) { dev_err(dev, "failed to find watchdog clock source\n"); ret = PTR_ERR(wdt_clock); goto err_irq; } clk_enable(wdt_clock); rk29_wdt_set_heartbeat(tmr_margin); ret = misc_register(&rk29_wdt_miscdev); if (ret) { dev_err(dev, "cannot register miscdev on minor=%d (%d)\n", WATCHDOG_MINOR, ret); goto err_clk; } printk("watchdog misc directory:%s\n", rk29_wdt_miscdev.nodename); if (tmr_atboot && started == 0) { dev_info(dev, "starting watchdog timer\n"); rk29_wdt_start(); } else if (!tmr_atboot) { /* if we're not enabling the watchdog, then ensure it is * disabled if it has been left running from the bootloader * or other source */ rk29_wdt_stop(); } return 0; err_clk: clk_disable(wdt_clock); clk_put(wdt_clock); err_irq: free_irq(wdt_irq->start, pdev); err_map: iounmap(wdt_base); err_req: release_resource(wdt_mem); kfree(wdt_mem); return ret; }
static int sdhci_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sdhci_host *host; struct resource *iomem; struct spear_sdhci *sdhci; int ret; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; dev_dbg(&pdev->dev, "memory resource not defined\n"); goto err; } if (!devm_request_mem_region(&pdev->dev, iomem->start, resource_size(iomem), "spear-sdhci")) { ret = -EBUSY; dev_dbg(&pdev->dev, "cannot request region\n"); goto err; } sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL); if (!sdhci) { ret = -ENOMEM; dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); goto err; } /* clk enable */ sdhci->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(sdhci->clk)) { ret = PTR_ERR(sdhci->clk); dev_dbg(&pdev->dev, "Error getting clock\n"); goto err; } ret = clk_prepare_enable(sdhci->clk); if (ret) { dev_dbg(&pdev->dev, "Error enabling clock\n"); goto put_clk; } ret = clk_set_rate(sdhci->clk, 50000000); if (ret) dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", clk_get_rate(sdhci->clk)); if (np) { sdhci->data = sdhci_probe_config_dt(pdev); if (IS_ERR(sdhci->data)) { dev_err(&pdev->dev, "DT: Failed to get pdata\n"); return -ENODEV; } } else { sdhci->data = dev_get_platdata(&pdev->dev); } pdev->dev.platform_data = sdhci; if (pdev->dev.parent) host = sdhci_alloc_host(pdev->dev.parent, 0); else host = sdhci_alloc_host(&pdev->dev, 0); if (IS_ERR(host)) { ret = PTR_ERR(host); dev_dbg(&pdev->dev, "error allocating host\n"); goto disable_clk; } host->hw_name = "sdhci"; host->ops = &sdhci_pltfm_ops; host->irq = platform_get_irq(pdev, 0); host->quirks = SDHCI_QUIRK_BROKEN_ADMA; host->ioaddr = devm_ioremap(&pdev->dev, iomem->start, resource_size(iomem)); if (!host->ioaddr) { ret = -ENOMEM; dev_dbg(&pdev->dev, "failed to remap registers\n"); goto free_host; } ret = sdhci_add_host(host); if (ret) { dev_dbg(&pdev->dev, "error adding host\n"); goto free_host; } platform_set_drvdata(pdev, host); /* * It is optional to use GPIOs for sdhci Power control & sdhci card * interrupt detection. If sdhci->data is NULL, then use original sdhci * lines otherwise GPIO lines. * If GPIO is selected for power control, then power should be disabled * after card removal and should be enabled when card insertion * interrupt occurs */ if (!sdhci->data) return 0; if (sdhci->data->card_power_gpio >= 0) { int val = 0; ret = devm_gpio_request(&pdev->dev, sdhci->data->card_power_gpio, "sdhci"); if (ret < 0) { dev_dbg(&pdev->dev, "gpio request fail: %d\n", sdhci->data->card_power_gpio); goto set_drvdata; } if (sdhci->data->power_always_enb) val = sdhci->data->power_active_high; else val = !sdhci->data->power_active_high; ret = gpio_direction_output(sdhci->data->card_power_gpio, val); if (ret) { dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", sdhci->data->card_power_gpio); goto set_drvdata; } } if (sdhci->data->card_int_gpio >= 0) { ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio, "sdhci"); if (ret < 0) { dev_dbg(&pdev->dev, "gpio request fail: %d\n", sdhci->data->card_int_gpio); goto set_drvdata; } ret = gpio_direction_input(sdhci->data->card_int_gpio); if (ret) { dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", sdhci->data->card_int_gpio); goto set_drvdata; } ret = devm_request_irq(&pdev->dev, gpio_to_irq(sdhci->data->card_int_gpio), sdhci_gpio_irq, IRQF_TRIGGER_LOW, mmc_hostname(host->mmc), pdev); if (ret) { dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", sdhci->data->card_int_gpio); goto set_drvdata; } } return 0; set_drvdata: platform_set_drvdata(pdev, NULL); sdhci_remove_host(host, 1); free_host: sdhci_free_host(host); disable_clk: clk_disable_unprepare(sdhci->clk); put_clk: clk_put(sdhci->clk); err: dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); return ret; }
static int s3c_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; int ret; pr_debug("%s: probe=%p\n", __func__, pdev); /* find the IRQs */ s3c_rtc_tickno = platform_get_irq(pdev, 1); if (s3c_rtc_tickno < 0) { dev_err(&pdev->dev, "no irq for rtc tick\n"); return -ENOENT; } s3c_rtc_alarmno = platform_get_irq(pdev, 0); if (s3c_rtc_alarmno < 0) { dev_err(&pdev->dev, "no irq for alarm\n"); return -ENOENT; } pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n", s3c_rtc_tickno, s3c_rtc_alarmno); /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get memory region resource\n"); return -ENOENT; } s3c_rtc_mem = request_mem_region(res->start, res->end-res->start+1, pdev->name); if (s3c_rtc_mem == NULL) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -ENOENT; goto err_nores; } s3c_rtc_base = ioremap(res->start, res->end - res->start + 1); if (s3c_rtc_base == NULL) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -EINVAL; goto err_nomap; } /* check to see if everything is setup correctly */ s3c_rtc_enable(pdev, 1); pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(s3c_rtc_base + S3C2410_RTCCON)); s3c_rtc_setfreq(s3c_rtc_freq); /* register RTC and exit */ rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "cannot attach rtc\n"); ret = PTR_ERR(rtc); goto err_nortc; } rtc->max_user_freq = 128; platform_set_drvdata(pdev, rtc); return 0; err_nortc: s3c_rtc_enable(pdev, 0); iounmap(s3c_rtc_base); err_nomap: release_resource(s3c_rtc_mem); err_nores: return ret; }
static int __init pasic3_probe(struct platform_device *pdev) { struct pasic3_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct pasic3_data *asic; struct resource *r; int ret; int irq = 0; r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (r) { ds1wm_resources[1].flags = IORESOURCE_IRQ | (r->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE)); irq = r->start; } r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (r) { ds1wm_resources[1].flags = IORESOURCE_IRQ | (r->flags & (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE)); irq = r->start; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -ENXIO; if (!request_mem_region(r->start, resource_size(r), "pasic3")) return -EBUSY; asic = kzalloc(sizeof(struct pasic3_data), GFP_KERNEL); if (!asic) return -ENOMEM; platform_set_drvdata(pdev, asic); asic->mapping = ioremap(r->start, resource_size(r)); if (!asic->mapping) { dev_err(dev, "couldn't ioremap PASIC3\n"); kfree(asic); return -ENOMEM; } /* calculate bus shift from mem resource */ asic->bus_shift = (resource_size(r) - 5) >> 3; if (pdata && pdata->clock_rate) { ds1wm_pdata.clock_rate = pdata->clock_rate; /* the first 5 PASIC3 registers control the DS1WM */ ds1wm_resources[0].end = (5 << asic->bus_shift) - 1; ds1wm_cell.platform_data = &ds1wm_cell; ds1wm_cell.data_size = sizeof(ds1wm_cell); ret = mfd_add_devices(&pdev->dev, pdev->id, &ds1wm_cell, 1, r, irq); if (ret < 0) dev_warn(dev, "failed to register DS1WM\n"); } if (pdata && pdata->led_pdata) { led_cell.driver_data = pdata->led_pdata; led_cell.platform_data = &led_cell; led_cell.data_size = sizeof(ds1wm_cell); ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0); if (ret < 0) dev_warn(dev, "failed to register LED device\n"); } return 0; }