static int __init gic_devtree_init(struct vmm_devtree_node *node, struct vmm_devtree_node *parent, bool eoimode) { int rc; u32 irq, irq_start = 0; physical_size_t cpu_sz; virtual_addr_t cpu_base; virtual_addr_t cpu2_base; virtual_addr_t dist_base; if (WARN_ON(!node)) { return VMM_ENODEV; } rc = vmm_devtree_request_regmap(node, &dist_base, 0, "GIC Dist"); WARN(rc, "unable to map gic dist registers\n"); rc = vmm_devtree_request_regmap(node, &cpu_base, 1, "GIC CPU"); WARN(rc, "unable to map gic cpu registers\n"); rc = vmm_devtree_request_regmap(node, &cpu2_base, 4, "GIC CPU2"); if (rc) { rc = vmm_devtree_regsize(node, &cpu_sz, 1); if (rc) { return rc; } if (cpu_sz >= 0x20000) { cpu2_base = cpu_base + 0x10000; } else if (cpu_sz >= 0x2000) { cpu2_base = cpu_base + 0x1000; } else { cpu2_base = 0x0; } } if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) { irq_start = 0; } rc = gic_init_bases(node, gic_cnt, eoimode, irq_start, cpu_base, cpu2_base, dist_base); if (rc) { return rc; } if (parent) { if (vmm_devtree_read_u32(node, "parent_irq", &irq)) { irq = 1020; } gic_cascade_irq(gic_cnt, irq); } else { vmm_host_irq_set_active_callback(gic_active_irq); } gic_cnt++; return VMM_OK; }
static int imx_src_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { int ret = VMM_OK; struct vmm_devtree_node *np = dev->node; u32 val; ret = vmm_devtree_request_regmap(np, (virtual_addr_t *)&src_base, 0, "i.MX Reset Control"); if (VMM_OK != ret) { vmm_printf("Failed to retrive %s register mapping\n"); return ret; } imx_reset_controller.node = np; #ifdef CONFIG_RESET_CONTROLLER reset_controller_register(&imx_reset_controller); #endif /* CONFIG_RESET_CONTROLLER */ /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); return 0; }
static int __init fpga_init(struct vmm_devtree_node *node) { int rc; virtual_addr_t base; u32 clear_mask; u32 valid_mask; u32 picen_mask; u32 irq_start; u32 parent_irq; BUG_ON(!vmm_smp_is_bootcpu()); rc = vmm_devtree_request_regmap(node, &base, 0, "Versatile SIC"); WARN(rc, "unable to map fpga irq registers\n"); if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) { irq_start = 0; } if (vmm_devtree_read_u32(node, "clear-mask", &clear_mask)) { clear_mask = 0; } if (vmm_devtree_read_u32(node, "valid-mask", &valid_mask)) { valid_mask = 0; } /* Some chips are cascaded from a parent IRQ */ if (vmm_devtree_irq_get(node, &parent_irq, 0)) { parent_irq = 0xFFFFFFFF; } fpga_irq_init((void *)base, "FPGA", irq_start, parent_irq, valid_mask, node); vmm_writel(clear_mask, (void *)base + IRQ_ENABLE_CLEAR); vmm_writel(clear_mask, (void *)base + FIQ_ENABLE_CLEAR); /* For VersatilePB, we have interrupts from 21 to 31 capable * of being routed directly to the parent interrupt controller * (i.e. VIC). This is controlled by setting PIC_ENABLEx. */ if (!vmm_devtree_read_u32(node, "picen-mask", &picen_mask)) { vmm_writel(picen_mask, (void *)base + PICEN_SET); } return 0; }
static int __init bcm2835_clocksource_init(struct vmm_devtree_node *node) { int rc; u32 clock; struct bcm2835_clocksource *bcs; /* Read clock frequency */ rc = vmm_devtree_clock_frequency(node, &clock); if (rc) { return rc; } bcs = vmm_zalloc(sizeof(struct bcm2835_clocksource)); if (!bcs) { return VMM_ENOMEM; } /* Map timer registers */ rc = vmm_devtree_request_regmap(node, &bcs->base, 0, "BCM2835 Timer"); if (rc) { vmm_free(bcs); return rc; } bcs->system_clock = (void *)(bcs->base + REG_COUNTER_LO); /* Setup clocksource */ bcs->clksrc.name = "bcm2835_timer"; bcs->clksrc.rating = 300; bcs->clksrc.read = bcm2835_clksrc_read; bcs->clksrc.mask = VMM_CLOCKSOURCE_MASK(32); vmm_clocks_calc_mult_shift(&bcs->clksrc.mult, &bcs->clksrc.shift, clock, VMM_NSEC_PER_SEC, 10); bcs->clksrc.priv = bcs; /* Register clocksource */ rc = vmm_clocksource_register(&bcs->clksrc); if (rc) { vmm_devtree_regunmap_release(node, bcs->base, 0); vmm_free(bcs); return rc; } return VMM_OK; }
static int realview_sysreg_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int err; virtual_addr_t base_va; if (!realview_sysreg_base) { err = vmm_devtree_request_regmap(dev->node, &base_va, 0, "Realview Sysreg"); if (err) { return err; } realview_sysreg_base = (void *)base_va; } if (!realview_sysreg_base) { vmm_printf("%s: Failed to obtain base address!\n", __func__); return VMM_EFAULT; } return VMM_OK; }
void __init realview_sysreg_of_early_init(void) { int err; virtual_addr_t base_va; struct vmm_devtree_node *node; if (realview_sysreg_base) return; node = vmm_devtree_find_compatible(NULL, NULL, "arm,realview-sysreg"); if (node) { err = vmm_devtree_request_regmap(node, &base_va, 0, "Realview Sysreg"); if (err) { vmm_printf("%s: Faild to map registers (err %d)\n", __func__, err); return; } realview_sysreg_base = (void *)base_va; vmm_devtree_dref_node(node); } }
static int spi_imx_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { virtual_addr_t vaddr = 0; struct spi_master *master; struct spi_imx_data *spi_imx; int i; int ret = VMM_OK; u32 num_cs; if (!vmm_devtree_is_available(dev->of_node)) { dev_info(dev, "device is disabled\n"); return ret; } ret = vmm_devtree_read_u32(dev->of_node, "fsl,spi-num-chipselects", &num_cs); if (ret < 0) { return ret; } master = spi_alloc_master(dev, sizeof(struct spi_imx_data) + sizeof(int) * num_cs); if (!master) { dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } vmm_devdrv_set_data(dev, master); master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); master->num_chipselect = num_cs; spi_imx = spi_master_get_devdata(master); spi_imx->bitbang.master = master; for (i = 0; i < master->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(dev->of_node, "cs-gpios", i); spi_imx->chipselect[i] = cs_gpio; if (!gpio_is_valid(cs_gpio)) continue; ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); if (ret) { dev_err(dev, "can't get cs gpios\n"); goto out_gpio_free; } } spi_imx->bitbang.chipselect = spi_imx_chipselect; spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; spi_imx->bitbang.txrx_bufs = spi_imx_transfer; spi_imx->bitbang.master->setup = spi_imx_setup; spi_imx->bitbang.master->cleanup = spi_imx_cleanup; spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; init_completion(&spi_imx->xfer_done); spi_imx->devtype_data = devid->data; ret = vmm_devtree_request_regmap(dev->of_node, &vaddr, 0, "i.MX SPI"); if (VMM_OK != ret) { ret = PTR_ERR(spi_imx->base); goto out_gpio_free; } spi_imx->base = (void __iomem *)vaddr; master->bus_num = vmm_devtree_alias_get_id(dev->of_node, "spi"); if (0 > master->bus_num) { ret = master->bus_num; goto out_gpio_free; } spi_imx->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!spi_imx->irq) { ret = VMM_ENODEV; goto out_gpio_free; } ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); if (VMM_OK != ret) { dev_err(dev, "can't get irq%d: %d\n", spi_imx->irq, ret); goto out_gpio_free; } spi_imx->clk_ipg = clk_get(dev, "ipg"); if (IS_ERR(spi_imx->clk_ipg)) { ret = PTR_ERR(spi_imx->clk_ipg); goto out_gpio_free; } spi_imx->clk_per = clk_get(dev, "per"); if (IS_ERR(spi_imx->clk_per)) { ret = PTR_ERR(spi_imx->clk_per); goto out_gpio_free; } ret = clk_prepare_enable(spi_imx->clk_per); if (ret) goto out_gpio_free; ret = clk_prepare_enable(spi_imx->clk_ipg); if (ret) goto out_put_per; spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); spi_imx->devtype_data->reset(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, 0); master->dev.of_node = dev->of_node; ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { dev_err(dev, "bitbang start failed with %d\n", ret); goto out_clk_put; } /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable(spi_imx->clk_ipg); clk_disable(spi_imx->clk_per); #endif return ret; out_clk_put: /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable_unprepare(spi_imx->clk_ipg); #endif out_put_per: /* FIXME: This should not disable the clock, as they are still set as used by the UART */ #if 0 clk_disable_unprepare(spi_imx->clk_per); #endif out_gpio_free: while (i-- > 0) { gpio_free(spi_imx->chipselect[i]); } spi_master_put(master); dev_err(dev, "probing failed\n"); return ret; }
static int mxc_hdmi_core_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nid) { struct mxc_hdmi_data *hdmi_data = NULL; unsigned long flags; virtual_addr_t base_va = 0L; int ret = VMM_EFAIL; hdmi_core_init = 0; hdmi_dma_running = 0; ret = vmm_devtree_request_regmap(dev->of_node, &base_va, 0, "MXC HDMI Core"); if (ret) { dev_err(dev, "failed to request regmap\n"); goto fail; } ret = hdmi_core_get_of_property(dev); if (ret < 0) { dev_err(dev, "get hdmi of property fail\n"); goto fail; } hdmi_data = vmm_devm_zalloc(dev, sizeof(struct mxc_hdmi_data)); if (!hdmi_data) { dev_err(dev, "Couldn't allocate mxc hdmi mfd device\n"); goto fail; } hdmi_data->dev = dev; pixel_clk = NULL; sample_rate = 48000; pixel_clk_rate = 0; hdmi_ratio = 100; spin_lock_init(&irq_spinlock); spin_lock_init(&edid_spinlock); spin_lock_init(&hdmi_cable_state_lock); spin_lock_init(&hdmi_blank_state_lock); spin_lock_init(&hdmi_audio_lock); spin_lock_irqsave(&hdmi_cable_state_lock, flags); hdmi_cable_state = 0; spin_unlock_irqrestore(&hdmi_cable_state_lock, flags); spin_lock_irqsave(&hdmi_blank_state_lock, flags); hdmi_blank_state = 0; spin_unlock_irqrestore(&hdmi_blank_state_lock, flags); spin_lock_irqsave(&hdmi_audio_lock, flags); #if 0 hdmi_audio_stream_playback = NULL; #endif hdmi_abort_state = 0; spin_unlock_irqrestore(&hdmi_audio_lock, flags); isfr_clk = devm_clk_get(dev, "hdmi_isfr"); if (IS_ERR(isfr_clk)) { ret = PTR_ERR(isfr_clk); dev_err(dev, "Unable to get HDMI isfr clk: %d\n", ret); goto fail; } ret = clk_prepare_enable(isfr_clk); if (ret < 0) { dev_err(dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke; } pr_debug("%s isfr_clk:%lu\n", __func__, clk_get_rate(isfr_clk)); iahb_clk = devm_clk_get(dev, "hdmi_iahb"); if (IS_ERR(iahb_clk)) { ret = PTR_ERR(iahb_clk); dev_err(dev, "Unable to get HDMI iahb clk: %d\n", ret); goto eclkg2; } ret = clk_prepare_enable(iahb_clk); if (ret < 0) { dev_err(dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke2; } hdmi_data->reg_base = (void *)base_va; hdmi_base = hdmi_data->reg_base; initialize_hdmi_ih_mutes(); /* Disable HDMI clocks until video/audio sub-drivers are initialized */ clk_disable_unprepare(isfr_clk); clk_disable_unprepare(iahb_clk); /* Replace platform data coming in with a local struct */ vmm_devdrv_set_data(dev, hdmi_data); return ret; eclke2: clk_put(iahb_clk); eclkg2: clk_disable_unprepare(isfr_clk); eclke: clk_put(isfr_clk); fail: if (hdmi_data) vmm_free(hdmi_data); return ret; }
static int uart_8250_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc; struct uart_8250_port *port; physical_addr_t ioport; const char *aval; port = vmm_zalloc(sizeof(struct uart_8250_port)); if(!port) { rc = VMM_ENOMEM; goto free_nothing; } if (vmm_devtree_read_string(dev->node, VMM_DEVTREE_ADDRESS_TYPE_ATTR_NAME, &aval)) { aval = NULL; } if (aval && !strcmp(aval, VMM_DEVTREE_ADDRESS_TYPE_VAL_IO)) { port->use_ioport = TRUE; } else { port->use_ioport = FALSE; } if (port->use_ioport) { rc = vmm_devtree_regaddr(dev->node, &ioport, 0); if (rc) { goto free_port; } port->base = ioport; } else { rc = vmm_devtree_request_regmap(dev->node, &port->base, 0, "UART 8250"); if (rc) { goto free_port; } } if (vmm_devtree_read_u32(dev->node, "reg-shift", &port->reg_shift)) { port->reg_shift = 0; } if (vmm_devtree_read_u32(dev->node, "reg-io-width", &port->reg_width)) { port->reg_width = 1; } if (vmm_devtree_read_u32(dev->node, "baudrate", &port->baudrate)) { port->baudrate = 115200; } rc = vmm_devtree_clock_frequency(dev->node, &port->input_clock); if (rc) { goto free_reg; } /* Call low-level init function * Note: low-level init will make sure that * interrupts are disabled in IER register. */ uart_8250_lowlevel_init(port); /* Setup interrupt handler */ port->irq = vmm_devtree_irq_parse_map(dev->node, 0); if (!port->irq) { rc = VMM_ENODEV; goto free_reg; } if ((rc = vmm_host_irq_register(port->irq, dev->name, uart_8250_irq_handler, port))) { goto free_reg; } /* Create Serial Port */ port->p = serial_create(dev, 256, uart_8250_tx, port); if (VMM_IS_ERR_OR_NULL(port->p)) { rc = VMM_PTR_ERR(port->p); goto free_irq; } /* Save port pointer */ dev->priv = port; /* Unmask Rx interrupt */ port->ier |= (UART_IER_RLSI | UART_IER_RDI); uart_8250_out(port, UART_IER_OFFSET, port->ier); return VMM_OK; free_irq: vmm_host_irq_unregister(port->irq, port); free_reg: if (!port->use_ioport) { vmm_devtree_regunmap_release(dev->node, port->base, 0); } free_port: vmm_free(port); free_nothing: return rc; }
static int s3c_rtc_driver_probe(struct vmm_device *pdev, const struct vmm_devtree_nodeid *devid) { u32 alarmno, tickno; struct rtc_time rtc_tm; int ret = VMM_OK, tmp, rc; /* find the IRQs */ rc = vmm_devtree_irq_get(pdev->node, &alarmno, 0); if (rc) { rc = VMM_EFAIL; return rc; } s3c_rtc_alarmno = alarmno; rc = vmm_devtree_irq_get(pdev->node, &tickno, 1); if (rc) { rc = VMM_EFAIL; return rc; } s3c_rtc_tickno = tickno; /* get the memory region */ rc = vmm_devtree_request_regmap(pdev->node, (virtual_addr_t *)&s3c_rtc_base, 0, "S3C RTC"); if (rc) { dev_err(pdev, "failed ioremap()\n"); ret = rc; goto err_nomap; } rtc_clk = clk_get(pdev, "rtc"); if (rtc_clk == NULL) { dev_err(pdev, "failed to find rtc clock source\n"); ret = -ENODEV; goto err_clk; } clk_enable(rtc_clk); /* check to see if everything is setup correctly */ s3c_rtc_enable(pdev, 1); //device_init_wakeup(pdev, 1); /* register RTC and exit */ s3c_rtcops.dev.parent = pdev; rc = rtc_device_register(&s3c_rtcops); if (rc) { dev_err(pdev, "cannot attach rtc\n"); ret = rc; goto err_nortc; } s3c_rtc_cpu_type = (enum s3c_cpu_type)devid->data; /* Check RTC Time */ s3c_rtc_gettime(NULL, &rtc_tm); if (!rtc_valid_tm(&rtc_tm)) { dev_warn(pdev, "warning: invalid RTC value so initializing it\n"); rtc_tm.tm_year = 100; rtc_tm.tm_mon = 0; rtc_tm.tm_mday = 1; rtc_tm.tm_hour = 0; rtc_tm.tm_min = 0; rtc_tm.tm_sec = 0; s3c_rtc_settime(NULL, &rtc_tm); } if (s3c_rtc_cpu_type != TYPE_S3C2410) max_user_freq = 32768; else max_user_freq = 128; if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) { tmp = readw(s3c_rtc_base + S3C2410_RTCCON); tmp |= S3C2443_RTCCON_TICSEL; writew(tmp, s3c_rtc_base + S3C2410_RTCCON); } pdev->priv = &s3c_rtcops; s3c_rtc_setfreq(&s3c_rtcops, 1); if ((rc = vmm_host_irq_register(s3c_rtc_alarmno, "s3c_rtc_alarm", s3c_rtc_alarmirq, &s3c_rtcops))) { dev_err(pdev, "IRQ%d error %d\n", s3c_rtc_alarmno, rc); goto err_alarm_irq; } if ((rc = vmm_host_irq_register(s3c_rtc_tickno, "s3c_rtc_tick", s3c_rtc_tickirq, &s3c_rtcops))) { dev_err(pdev, "IRQ%d error %d\n", s3c_rtc_tickno, rc); goto err_tick_irq; } clk_disable(rtc_clk); return 0; err_tick_irq: vmm_host_irq_unregister(s3c_rtc_alarmno, &s3c_rtcops); err_alarm_irq: pdev->priv = NULL; rtc_device_unregister(&s3c_rtcops); err_nortc: s3c_rtc_enable(pdev, 0); clk_disable(rtc_clk); clk_put(rtc_clk); err_clk: vmm_devtree_regunmap_release(pdev->node, (virtual_addr_t)s3c_rtc_base, 0); err_nomap: return ret; }
static int samsung_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { u32 ucon; int rc = VMM_EFAIL; struct samsung_port *port = NULL; port = vmm_zalloc(sizeof(struct samsung_port)); if (!port) { rc = VMM_ENOMEM; goto free_nothing; } rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0, "Samsung UART"); if (rc) { goto free_port; } /* Make sure all interrupts except Rx are masked. */ port->mask = S3C64XX_UINTM_RXD_MSK; port->mask = ~port->mask; vmm_out_le16((void *)(port->base + S3C64XX_UINTM), port->mask); if (vmm_devtree_read_u32(dev->of_node, "baudrate", &port->baudrate)) { port->baudrate = 115200; } rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock); if (rc) { goto free_reg; } /* Setup interrupt handler */ port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!port->irq) { rc = VMM_ENODEV; goto free_reg; } if ((rc = vmm_host_irq_register(port->irq, dev->name, samsung_irq_handler, port))) { goto free_reg; } /* Call low-level init function */ samsung_lowlevel_init(port->base, port->baudrate, port->input_clock); /* Create Serial Port */ port->p = serial_create(dev, 256, samsung_tx, port); if (VMM_IS_ERR_OR_NULL(port->p)) { rc = VMM_PTR_ERR(port->p); goto free_irq; } /* Save port pointer */ dev->priv = port; /* Unmask RX interrupt */ ucon = vmm_in_le32((void *)(port->base + S3C2410_UCON)); ucon |= S3C2410_UCON_RXIRQMODE; vmm_out_le32((void *)(port->base + S3C2410_UCON), ucon); return VMM_OK; free_irq: vmm_host_irq_unregister(port->irq, port); free_reg: vmm_devtree_regunmap_release(dev->of_node, port->base, 0); free_port: vmm_free(port); free_nothing: return rc; }
static int mmci_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc; u32 sdi; virtual_addr_t base; physical_addr_t basepa; struct mmc_host *mmc; struct mmci_host *host; mmc = mmc_alloc_host(sizeof(struct mmci_host), dev); if (!mmc) { rc = VMM_ENOMEM; goto free_nothing; } host = mmc_priv(mmc); rc = vmm_devtree_request_regmap(dev->node, &base, 0, "PL180 MMCI"); if (rc) { goto free_host; } host->base = (struct sdi_registers *)base; host->irq0 = vmm_devtree_irq_parse_map(dev->node, 0); if (!host->irq0) { rc = VMM_ENODEV; goto free_reg; } if ((rc = vmm_host_irq_register(host->irq0, dev->name, mmci_cmd_irq_handler, mmc))) { goto free_reg; } host->irq1 = vmm_devtree_irq_parse_map(dev->node, 1); if (host->irq1) { if ((rc = vmm_host_irq_register(host->irq1, dev->name, mmci_pio_irq_handler, mmc))) { goto free_irq0; } host->singleirq = 0; } else { host->singleirq = 1; } /* Retrive matching data */ host->pwr_init = ((const u32 *)devid->data)[0]; host->clkdiv_init = ((const u32 *)devid->data)[1]; host->voltages = ((const u32 *)devid->data)[2]; host->caps = ((const u32 *)devid->data)[3]; host->clock_in = ((const u32 *)devid->data)[4]; host->clock_min = ((const u32 *)devid->data)[5]; host->clock_max = ((const u32 *)devid->data)[6]; host->b_max = ((const u32 *)devid->data)[7]; host->version2 = ((const u32 *)devid->data)[8]; /* Initialize power and clock divider */ vmm_writel(host->pwr_init, &host->base->power); vmm_writel(host->clkdiv_init, &host->base->clock); vmm_udelay(CLK_CHANGE_DELAY); /* Disable interrupts */ sdi = vmm_readl(&host->base->mask0) & ~SDI_MASK0_MASK; vmm_writel(sdi, &host->base->mask0); /* Setup mmc host configuration */ mmc->caps = host->caps; mmc->voltages = host->voltages; mmc->f_min = host->clock_min; mmc->f_max = host->clock_max; mmc->b_max = host->b_max; /* Setup mmc host operations */ mmc->ops.send_cmd = mmci_request; mmc->ops.set_ios = mmci_set_ios; mmc->ops.init_card = mmci_init_card; mmc->ops.get_cd = NULL; mmc->ops.get_wp = NULL; rc = mmc_add_host(mmc); if (rc) { goto free_irq1; } dev->priv = mmc; vmm_devtree_regaddr(dev->node, &basepa, 0); vmm_printf("%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", dev->name, amba_part(dev), amba_manf(dev), amba_rev(dev), (unsigned long long)basepa, host->irq0, host->irq1); return VMM_OK; free_irq1: if (!host->singleirq) { vmm_host_irq_unregister(host->irq1, mmc); } free_irq0: vmm_host_irq_unregister(host->irq0, mmc); free_reg: vmm_devtree_regunmap_release(dev->node, (virtual_addr_t)host->base, 0); free_host: mmc_free_host(mmc); free_nothing: return rc; }
static int omap_uart_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc; struct omap_uart_port *port; port = vmm_zalloc(sizeof(struct omap_uart_port)); if (!port) { rc = VMM_ENOMEM; goto free_nothing; } rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0, "omap-uart"); if (rc) { goto free_port; } if (vmm_devtree_read_u32(dev->of_node, "reg-shift", &port->reg_shift)) { port->reg_shift = 0; } if (vmm_devtree_read_u32(dev->of_node, "baudrate", &port->baudrate)) { port->baudrate = 115200; } rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock); if (rc) { goto free_reg; } omap_uart_startup_configure(port); port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!port->irq) { rc = VMM_ENODEV; goto free_reg; } if ((rc = vmm_host_irq_register(port->irq, dev->name, omap_uart_irq_handler, port))) { goto free_reg; } /* Create Serial Port */ port->p = serial_create(dev, 256, omap_uart_tx, port); if (VMM_IS_ERR_OR_NULL(port->p)) { rc = VMM_PTR_ERR(port->p); goto free_irq; } /* Save port pointer */ dev->priv = port; /* Unmask Rx interrupt */ port->ier |= (UART_IER_RDI | UART_IER_RLSI); omap_serial_out(port, UART_IER, port->ier); return VMM_OK; free_irq: vmm_host_irq_unregister(port->irq, port); free_reg: vmm_devtree_regunmap_release(dev->of_node, port->base, 0); free_port: vmm_free(port); free_nothing: return rc; }
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node) { int rc; u32 ref_cnt_freq; virtual_addr_t ref_cnt_addr; u32 cpu = vmm_smp_processor_id(); struct twd_clockchip *cc = &this_cpu(twd_cc); if (!twd_base) { rc = vmm_devtree_request_regmap(node, &twd_base, 0, "ARM Local Timer"); if (rc) { goto fail; } } if (!twd_ppi_irq) { twd_ppi_irq = vmm_devtree_irq_parse_map(node, 0); if (!twd_ppi_irq) { rc = VMM_ENODEV; goto fail_regunmap; } } if (!twd_freq_hz) { /* First try to find TWD clock */ if (!twd_clk) { twd_clk = of_clk_get(node, 0); } if (VMM_IS_ERR_OR_NULL(twd_clk)) { twd_clk = clk_get_sys("smp_twd", NULL); } if (!VMM_IS_ERR_OR_NULL(twd_clk)) { /* Use TWD clock to find frequency */ rc = clk_prepare_enable(twd_clk); if (rc) { clk_put(twd_clk); goto fail_regunmap; } twd_freq_hz = clk_get_rate(twd_clk); } else { /* No TWD clock found hence caliberate */ rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1); if (rc) { vmm_devtree_regunmap(node, ref_cnt_addr, 1); goto fail_regunmap; } if (vmm_devtree_read_u32(node, "ref-counter-freq", &ref_cnt_freq)) { vmm_devtree_regunmap(node, ref_cnt_addr, 1); goto fail_regunmap; } twd_caliberate_freq(twd_base, ref_cnt_addr, ref_cnt_freq); vmm_devtree_regunmap(node, ref_cnt_addr, 1); } } memset(cc, 0, sizeof(struct twd_clockchip)); vmm_sprintf(cc->name, "twd/%d", cpu); cc->clkchip.name = cc->name; cc->clkchip.hirq = twd_ppi_irq; cc->clkchip.rating = 350; cc->clkchip.cpumask = vmm_cpumask_of(cpu); cc->clkchip.features = VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, VMM_NSEC_PER_SEC, twd_freq_hz, 10); cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip); cc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip); cc->clkchip.set_mode = &twd_clockchip_set_mode; cc->clkchip.set_next_event = &twd_clockchip_set_next_event; cc->clkchip.priv = cc; /* Register interrupt handler */ if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd", &twd_clockchip_irq_handler, cc))) { goto fail_regunmap; } rc = vmm_clockchip_register(&cc->clkchip); if (rc) { goto fail_unreg_irq; } return VMM_OK; fail_unreg_irq: vmm_host_irq_unregister(twd_ppi_irq, cc); fail_regunmap: vmm_devtree_regunmap_release(node, twd_base, 0); fail: return rc; }
static int imx_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc = VMM_EFAIL; struct clk *clk_ipg = NULL; struct clk *clk_uart = NULL; struct imx_port *port = NULL; unsigned long old_rate = 0; port = vmm_zalloc(sizeof(struct imx_port)); if (!port) { rc = VMM_ENOMEM; goto free_nothing; } rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0, "iMX UART"); if (rc) { goto free_port; } if (vmm_devtree_read_u32(dev->of_node, "baudrate", &port->baudrate)) { port->baudrate = 115200; } rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock); if (rc) { goto free_reg; } /* Setup clocks */ clk_ipg = of_clk_get(dev->of_node, 0); clk_uart = of_clk_get(dev->of_node, 1); if (!VMM_IS_ERR_OR_NULL(clk_ipg)) { rc = clk_prepare_enable(clk_ipg); if (rc) { goto free_reg; } } if (!VMM_IS_ERR_OR_NULL(clk_uart)) { rc = clk_prepare_enable(clk_uart); if (rc) { goto clk_disable_unprepare_ipg; } old_rate = clk_get_rate(clk_uart); if (clk_set_rate(clk_uart, port->input_clock)) { vmm_printf("Could not set %s clock rate to %u Hz, " "actual rate: %u Hz\n", __clk_get_name(clk_uart), port->input_clock, clk_get_rate(clk_uart)); rc = VMM_ERANGE; goto clk_disable_unprepare_uart; } } /* Register interrupt handler */ port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!port->irq) { rc = VMM_ENODEV; goto clk_old_rate; } if ((rc = vmm_host_irq_register(port->irq, dev->name, imx_irq_handler, port))) { goto clk_old_rate; } /* Call low-level init function */ imx_lowlevel_init(port->base, port->baudrate, port->input_clock); /* Create Serial Port */ port->p = serial_create(dev, 256, imx_tx, port); if (VMM_IS_ERR_OR_NULL(port->p)) { rc = VMM_PTR_ERR(port->p); goto free_irq; } /* Save port pointer */ dev->priv = port; /* Unmask Rx, Mask Tx, and Enable UART */ port->mask = vmm_readl((void *)port->base + UCR1); port->mask |= (UCR1_RRDYEN | UCR1_UARTEN); port->mask &= ~(UCR1_TRDYEN); vmm_writel(port->mask, (void *)port->base + UCR1); return rc; free_irq: vmm_host_irq_unregister(port->irq, port); clk_old_rate: if (!VMM_IS_ERR_OR_NULL(clk_uart)) { if (old_rate) { clk_set_rate(clk_uart, old_rate); } } clk_disable_unprepare_uart: if (!VMM_IS_ERR_OR_NULL(clk_uart)) { clk_disable_unprepare(clk_uart); } clk_disable_unprepare_ipg: if (!VMM_IS_ERR_OR_NULL(clk_ipg)) { clk_disable_unprepare(clk_ipg); } free_reg: vmm_devtree_regunmap_release(dev->of_node, port->base, 0); free_port: vmm_free(port); free_nothing: return rc; }
static int amba_kmi_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { struct amba_kmi_port *kmi; struct serio *io; int ret; kmi = kzalloc(sizeof(struct amba_kmi_port), GFP_KERNEL); io = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!kmi || !io) { ret = -ENOMEM; goto out; } io->id.type = SERIO_8042; io->write = amba_kmi_write; io->open = amba_kmi_open; io->close = amba_kmi_close; if (strlcpy(io->name, dev->name, sizeof(io->name)) >= sizeof(io->name)) { ret = -EOVERFLOW; goto out; } if (strlcpy(io->phys, dev->name, sizeof(io->phys)) >= sizeof(io->phys)) { ret = -EOVERFLOW; goto out; } io->port_data = kmi; io->dev.parent = dev; kmi->io = io; ret = vmm_devtree_request_regmap(dev->node, (virtual_addr_t *)&kmi->base, 0, "AMBA KMI"); if (ret) { ret = -ENOMEM; goto out; } kmi->clk = clk_get(dev, "KMIREFCLK"); if (IS_ERR(kmi->clk)) { ret = PTR_ERR(kmi->clk); goto unmap; } kmi->irq = irq_of_parse_and_map(dev->node, 0); if (!kmi->irq) { ret = -EFAIL; goto unmap; } vmm_devdrv_set_data(dev, kmi); serio_register_port(kmi->io); return VMM_OK; unmap: vmm_devtree_regunmap_release(dev->node, (virtual_addr_t)kmi->base, 0); out: if (kmi) kfree(kmi); if (io) kfree(io); return ret; }
static int sun4i_mdio_probe(struct vmm_device *pdev, const struct vmm_devtree_nodeid *devid) { struct device_node *np = pdev->node; struct mii_bus *bus; struct sun4i_mdio_data *data; int ret, i; virtual_addr_t reg_addr; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) return -ENOMEM; bus->name = "sun4i_mii_bus"; bus->read = &sun4i_mdio_read; bus->write = &sun4i_mdio_write; bus->reset = &sun4i_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(pdev)); bus->parent = pdev; #if 0 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); #endif bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto err_out_free_mdiobus; } for (i = 0; i < PHY_MAX_ADDR; i++) bus->irq[i] = PHY_POLL; data = bus->priv; #if 0 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->membase)) { ret = PTR_ERR(data->membase); goto err_out_free_mdiobus; } #endif if ((ret = vmm_devtree_request_regmap(np, ®_addr, 0, "Sun4i MDIO"))) { vmm_printf("%s: Failed to ioremap\n", __func__); return -ENOMEM; } data->membase = (void *) reg_addr; #if 0 data->regulator = devm_regulator_get(&pdev->dev, "phy"); #endif data->regulator = devm_regulator_get(pdev, "phy"); if (IS_ERR(data->regulator)) { if (PTR_ERR(data->regulator) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_info(pdev, "no regulator found\n"); } else { ret = regulator_enable(data->regulator); if (ret) goto err_out_free_mdiobus; } ret = of_mdiobus_register(bus, np); if (ret < 0) goto err_out_disable_regulator; platform_set_drvdata(pdev, bus); return 0; err_out_disable_regulator: regulator_disable(data->regulator); err_out_free_mdiobus: mdiobus_free(bus); return ret; }
/* Search EMAC board, allocate space and register it */ static int emac_probe(struct vmm_device *pdev, const struct vmm_devtree_nodeid *devid) { struct device_node *np = pdev->node; struct emac_board_info *db; struct net_device *ndev; int ret = 0; const char *mac_addr; virtual_addr_t reg_addr; ndev = alloc_etherdev(sizeof(struct emac_board_info)); if (!ndev) { dev_err(pdev, "%s: could not allocate device.\n", __func__); return -ENOMEM; } strlcpy(ndev->name, pdev->name, sizeof(ndev->name)); SET_NETDEV_DEV(ndev, pdev); db = netdev_priv(ndev); memset(db, 0, sizeof(*db)); db->ndev = ndev; db->pdev = pdev; spin_lock_init(&db->lock); if ((ret = vmm_devtree_request_regmap(np, ®_addr, 0, "Sun4i EMAC"))) { vmm_printf("%s: Failed to ioreamp\n", __func__); return -ENOMEM; } db->membase = (void *) reg_addr; /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->membase; ret = vmm_devtree_irq_get(np, &ndev->irq, 0); if (ret) { vmm_printf("%s: No irq resource\n", __func__); goto out; } db->clk = clk_get(pdev, NULL); if (IS_ERR(db->clk)) goto out; clk_prepare_enable(db->clk); db->phy_node = vmm_devtree_parse_phandle(np, "phy", 0); if (!db->phy_node) { dev_err(pdev, "%s: no associated PHY\n", __func__); ret = -ENODEV; goto out; } /* Read MAC-address from DT */ mac_addr = of_get_mac_address(np); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(ndev->dev_addr)) { eth_hw_addr_random(ndev); dev_info(pdev, "using random MAC address: "); print_mac_address_fmt(ndev->dev_addr); } db->emacrx_completed_flag = 1; emac_powerup(ndev); emac_reset(db); ether_setup(ndev); ndev->netdev_ops = &emac_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &emac_ethtool_ops; platform_set_drvdata(pdev, ndev); /* Carrier starts down, phylib will bring it up */ netif_carrier_off(ndev); ret = register_netdev(ndev); if (ret) { dev_err(pdev, "%s: Registering netdev failed!\n", __func__); ret = -ENODEV; goto out; } dev_info(pdev, "%s: at %p, IRQ %d MAC: ", ndev->name, db->membase, ndev->irq); print_mac_address_fmt(ndev->dev_addr); return 0; out: dev_err(pdev, "%s: not found (%d).\n", __func__, ret); free_netdev(ndev); return ret; }