static int amba_kmi_driver_remove(struct vmm_device *dev) { struct amba_kmi_port *kmi = (struct amba_kmi_port *)vmm_devdrv_get_data(dev); vmm_devdrv_set_data(dev, NULL); serio_unregister_port(kmi->io); clk_put(kmi->clk); vmm_devtree_regunmap_release(dev->node, (virtual_addr_t)kmi->base, 0); kfree(kmi); return VMM_OK; }
static int sram_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { void *virt_base = NULL; struct sram_dev *sram = NULL; physical_addr_t start = 0; virtual_size_t size = 0; int ret = VMM_OK; ret = vmm_devtree_regaddr(dev->of_node, &start, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device base\n", dev->name); return ret; } ret = vmm_devtree_regsize(dev->of_node, &size, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device size\n", dev->name); goto err_out; } virt_base = (void *)vmm_host_iomap(start, size); if (NULL == virt_base) { vmm_printf("%s: Failed to get remap memory\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram = vmm_devm_zalloc(dev, sizeof(*sram)); if (!sram) { vmm_printf("%s: Failed to allocate structure\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram->clk = devm_clk_get(dev, NULL); if (VMM_IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); sram->pool = devm_gen_pool_create(dev, SRAM_GRANULARITY_LOG); if (!sram->pool) { vmm_printf("%s: Failed to create memory pool\n", dev->name); ret = VMM_ENOMEM; } ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, start, size); if (ret < 0) { vmm_printf("%s: Failed to add memory chunk\n", dev->name); goto err_out; } vmm_devdrv_set_data(dev, sram); vmm_printf("%s: SRAM pool: %ld KiB @ 0x%p\n", dev->name, size / 1024, virt_base); return 0; err_out: if (sram->pool) gen_pool_destroy(sram->pool); #if 0 if (sram->clk) clk_disable_unprepare(sram->clk); #endif /* 0 */ if (sram) vmm_free(sram); sram = NULL; if (virt_base) vmm_host_iounmap((virtual_addr_t)virt_base); virt_base = NULL; return ret; }
static int spi_imx_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { virtual_addr_t vaddr = 0; struct spi_master *master; struct spi_imx_data *spi_imx; int i; int ret = VMM_OK; u32 num_cs; if (!vmm_devtree_is_available(dev->of_node)) { dev_info(dev, "device is disabled\n"); return ret; } ret = vmm_devtree_read_u32(dev->of_node, "fsl,spi-num-chipselects", &num_cs); if (ret < 0) { return ret; } master = spi_alloc_master(dev, sizeof(struct spi_imx_data) + sizeof(int) * num_cs); if (!master) { dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } vmm_devdrv_set_data(dev, master); master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); master->num_chipselect = num_cs; spi_imx = spi_master_get_devdata(master); spi_imx->bitbang.master = master; for (i = 0; i < master->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(dev->of_node, "cs-gpios", i); spi_imx->chipselect[i] = cs_gpio; if (!gpio_is_valid(cs_gpio)) continue; ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); if (ret) { dev_err(dev, "can't get cs gpios\n"); goto out_gpio_free; } } spi_imx->bitbang.chipselect = spi_imx_chipselect; spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; spi_imx->bitbang.txrx_bufs = spi_imx_transfer; spi_imx->bitbang.master->setup = spi_imx_setup; spi_imx->bitbang.master->cleanup = spi_imx_cleanup; spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; init_completion(&spi_imx->xfer_done); spi_imx->devtype_data = devid->data; ret = vmm_devtree_request_regmap(dev->of_node, &vaddr, 0, "i.MX SPI"); if (VMM_OK != ret) { ret = PTR_ERR(spi_imx->base); goto out_gpio_free; } spi_imx->base = (void __iomem *)vaddr; master->bus_num = vmm_devtree_alias_get_id(dev->of_node, "spi"); if (0 > master->bus_num) { ret = master->bus_num; goto out_gpio_free; } spi_imx->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!spi_imx->irq) { ret = VMM_ENODEV; goto out_gpio_free; } ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); if (VMM_OK != ret) { dev_err(dev, "can't get irq%d: %d\n", spi_imx->irq, ret); goto out_gpio_free; } spi_imx->clk_ipg = clk_get(dev, "ipg"); if (IS_ERR(spi_imx->clk_ipg)) { ret = PTR_ERR(spi_imx->clk_ipg); goto out_gpio_free; } spi_imx->clk_per = clk_get(dev, "per"); if (IS_ERR(spi_imx->clk_per)) { ret = PTR_ERR(spi_imx->clk_per); goto out_gpio_free; } ret = clk_prepare_enable(spi_imx->clk_per); if (ret) goto out_gpio_free; ret = clk_prepare_enable(spi_imx->clk_ipg); if (ret) goto out_put_per; spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); spi_imx->devtype_data->reset(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, 0); master->dev.of_node = dev->of_node; ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { dev_err(dev, "bitbang start failed with %d\n", ret); goto out_clk_put; } /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable(spi_imx->clk_ipg); clk_disable(spi_imx->clk_per); #endif return ret; out_clk_put: /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable_unprepare(spi_imx->clk_ipg); #endif out_put_per: /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable_unprepare(spi_imx->clk_per); #endif out_gpio_free: while (i-- > 0) { gpio_free(spi_imx->chipselect[i]); } spi_master_put(master); dev_err(dev, "probing failed\n"); return ret; }
static int mxc_hdmi_core_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nid) { struct mxc_hdmi_data *hdmi_data = NULL; unsigned long flags; virtual_addr_t base_va = 0L; int ret = VMM_EFAIL; hdmi_core_init = 0; hdmi_dma_running = 0; ret = vmm_devtree_request_regmap(dev->of_node, &base_va, 0, "MXC HDMI Core"); if (ret) { dev_err(dev, "failed to request regmap\n"); goto fail; } ret = hdmi_core_get_of_property(dev); if (ret < 0) { dev_err(dev, "get hdmi of property fail\n"); goto fail; } hdmi_data = vmm_devm_zalloc(dev, sizeof(struct mxc_hdmi_data)); if (!hdmi_data) { dev_err(dev, "Couldn't allocate mxc hdmi mfd device\n"); goto fail; } hdmi_data->dev = dev; pixel_clk = NULL; sample_rate = 48000; pixel_clk_rate = 0; hdmi_ratio = 100; spin_lock_init(&irq_spinlock); spin_lock_init(&edid_spinlock); spin_lock_init(&hdmi_cable_state_lock); spin_lock_init(&hdmi_blank_state_lock); spin_lock_init(&hdmi_audio_lock); spin_lock_irqsave(&hdmi_cable_state_lock, flags); hdmi_cable_state = 0; spin_unlock_irqrestore(&hdmi_cable_state_lock, flags); spin_lock_irqsave(&hdmi_blank_state_lock, flags); hdmi_blank_state = 0; spin_unlock_irqrestore(&hdmi_blank_state_lock, flags); spin_lock_irqsave(&hdmi_audio_lock, flags); #if 0 hdmi_audio_stream_playback = NULL; #endif hdmi_abort_state = 0; spin_unlock_irqrestore(&hdmi_audio_lock, flags); isfr_clk = devm_clk_get(dev, "hdmi_isfr"); if (IS_ERR(isfr_clk)) { ret = PTR_ERR(isfr_clk); dev_err(dev, "Unable to get HDMI isfr clk: %d\n", ret); goto fail; } ret = clk_prepare_enable(isfr_clk); if (ret < 0) { dev_err(dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke; } pr_debug("%s isfr_clk:%lu\n", __func__, clk_get_rate(isfr_clk)); iahb_clk = devm_clk_get(dev, "hdmi_iahb"); if (IS_ERR(iahb_clk)) { ret = PTR_ERR(iahb_clk); dev_err(dev, "Unable to get HDMI iahb clk: %d\n", ret); goto eclkg2; } ret = clk_prepare_enable(iahb_clk); if (ret < 0) { dev_err(dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke2; } hdmi_data->reg_base = (void *)base_va; hdmi_base = hdmi_data->reg_base; initialize_hdmi_ih_mutes(); /* Disable HDMI clocks until video/audio sub-drivers are initialized */ clk_disable_unprepare(isfr_clk); clk_disable_unprepare(iahb_clk); /* Replace platform data coming in with a local struct */ vmm_devdrv_set_data(dev, hdmi_data); return ret; eclke2: clk_put(iahb_clk); eclkg2: clk_disable_unprepare(isfr_clk); eclke: clk_put(isfr_clk); fail: if (hdmi_data) vmm_free(hdmi_data); return ret; }
static int amba_kmi_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { struct amba_kmi_port *kmi; struct serio *io; int ret; kmi = kzalloc(sizeof(struct amba_kmi_port), GFP_KERNEL); io = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!kmi || !io) { ret = -ENOMEM; goto out; } io->id.type = SERIO_8042; io->write = amba_kmi_write; io->open = amba_kmi_open; io->close = amba_kmi_close; if (strlcpy(io->name, dev->name, sizeof(io->name)) >= sizeof(io->name)) { ret = -EOVERFLOW; goto out; } if (strlcpy(io->phys, dev->name, sizeof(io->phys)) >= sizeof(io->phys)) { ret = -EOVERFLOW; goto out; } io->port_data = kmi; io->dev.parent = dev; kmi->io = io; ret = vmm_devtree_request_regmap(dev->node, (virtual_addr_t *)&kmi->base, 0, "AMBA KMI"); if (ret) { ret = -ENOMEM; goto out; } kmi->clk = clk_get(dev, "KMIREFCLK"); if (IS_ERR(kmi->clk)) { ret = PTR_ERR(kmi->clk); goto unmap; } kmi->irq = irq_of_parse_and_map(dev->node, 0); if (!kmi->irq) { ret = -EFAIL; goto unmap; } vmm_devdrv_set_data(dev, kmi); serio_register_port(kmi->io); return VMM_OK; unmap: vmm_devtree_regunmap_release(dev->node, (virtual_addr_t)kmi->base, 0); out: if (kmi) kfree(kmi); if (io) kfree(io); return ret; }