static inline void spi_set_clk(struct rockchip_spi *rs, u16 div) { writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR); }
static int __init mps2_clocksource_init(struct device_node *np) { void __iomem *base; struct clk *clk = NULL; u32 rate; int ret; const char *name = "mps2-clksrc"; ret = of_property_read_u32(np, "clock-frequency", &rate); if (ret) { clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); pr_err("failed to get clock for clocksource: %d\n", ret); goto out; } ret = clk_prepare_enable(clk); if (ret) { pr_err("failed to enable clock for clocksource: %d\n", ret); goto out_clk_put; } rate = clk_get_rate(clk); } base = of_iomap(np, 0); if (!base) { ret = -EADDRNOTAVAIL; pr_err("failed to map register for clocksource: %d\n", ret); goto out_clk_disable; } /* Ensure timer is disabled */ writel_relaxed(0, base + TIMER_CTRL); /* ... and set it up as free-running clocksource */ writel_relaxed(0xffffffff, base + TIMER_VALUE); writel_relaxed(0xffffffff, base + TIMER_RELOAD); writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL); ret = clocksource_mmio_init(base + TIMER_VALUE, name, rate, 200, 32, clocksource_mmio_readl_down); if (ret) { pr_err("failed to init clocksource: %d\n", ret); goto out_iounmap; } sched_clock_base = base; sched_clock_register(mps2_sched_read, 32, rate); return 0; out_iounmap: iounmap(base); out_clk_disable: /* clk_{disable, unprepare, put}() can handle NULL as a parameter */ clk_disable_unprepare(clk); out_clk_put: clk_put(clk); out: return ret; }
/* The following operations are needed by Pixiu */ static inline void cti_enable_access(void) { writel_relaxed(0xC5ACCE55, CTI_LOCK); }
/* * We define our own outer_disable() to avoid L2 flush upon LP2 entry. * Since the Tegra kernel will always be in single core mode when * L2 is being disabled, we can omit the locking. Since we are not * accessing the spinlock we also avoid the problem of the spinlock * storage getting out of sync. */ static inline void tegra_l2x0_disable(void) { void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; writel_relaxed(0, p + L2X0_CTRL); dsb(); }
void imx_set_cpu_arg(int cpu, u32 arg) { cpu = cpu_logical_map(cpu); writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); }
static int footswitch_disable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & ENABLE_BIT) == 0) return 0; rc = setup_clocks(fs); if (rc) return rc; clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN); if (fs->bus_port0) { rc = msm_bus_axi_porthalt(fs->bus_port0); if (rc) { pr_err("%s port 0 halt failed.\n", fs->desc.name); goto err; } } if (fs->bus_port1) { rc = msm_bus_axi_porthalt(fs->bus_port1); if (rc) { pr_err("%s port 1 halt failed.\n", fs->desc.name); goto err_port2_halt; } } for (clock = fs->clk_data; clock->clk; clock++) ; for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); udelay(RESET_DELAY_US); restore_clocks(fs); regval |= CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); regval &= ~ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); fs->is_enabled = false; return 0; err_port2_halt: msm_bus_axi_portunhalt(fs->bus_port0); err: clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); restore_clocks(fs); return rc; }
void msm_io_w(u32 data, void __iomem *addr) { CDBG("%s: %08x %08x\n", __func__, (int) (addr), (data)); writel_relaxed((data), (addr)); }
void __sramfunc sram_i2c_disenable(void) { writel_relaxed(0, SRAM_I2C_ADDRBASE + I2C_CON); }
/* Write access to Registers */ static inline void cdns_wdt_writereg(void __iomem *offset, u32 val) { writel_relaxed(val, offset); }
static void combiner_unmask_irq(struct irq_data *data) { u32 mask = 1 << (data->hwirq % 32); writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET); }
void __sramfunc sram_i2c_write_enable(void) { writel_relaxed(((((I2C_CON_EN | I2C_CON_MOD(0)) | I2C_CON_LASTACK) )| I2C_CON_START) & (~(I2C_CON_STOP)) , SRAM_I2C_ADDRBASE + I2C_CON); }
static int __init omap_l2_cache_init(void) { u32 l2x0_auxctrl; u32 l2x0_por; u32 l2x0_lockdown; /* * To avoid code running on other OMAPs in * multi-omap builds */ if (!cpu_is_omap44xx()) return -ENODEV; /* Static mapping, never released */ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); BUG_ON(!l2cache_base); if (omap_rev() == OMAP4430_REV_ES1_0) { l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE_ES1; goto skip_auxctlr; } if (cpu_is_omap446x()) { if (omap_rev() == OMAP4460_REV_ES1_0) { l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE_ES1; l2x0_por = OMAP446X_PL310_POR_ES1; l2x0_lockdown = 0xa5a5; } else { l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE; l2x0_por = OMAP446X_PL310_POR; l2x0_lockdown = 0; } } else { l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE; l2x0_por = OMAP443X_PL310_POR; l2x0_lockdown = 0; } /* Set POR through PPA service only in EMU/HS devices */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { omap4_secure_dispatcher( PPA_SERVICE_PL310_POR, 0x7, 1, l2x0_por, 0, 0, 0); } else if (omap_rev() > OMAP4430_REV_ES2_1) omap_smc1(0x113, l2x0_por); /* * FIXME : Temporary WA for the OMAP4460 stability * issue. For OMAP4460 the effective L2X0 Size = 512 KB * with this WA. */ writel_relaxed(l2x0_lockdown, l2cache_base + 0x900); writel_relaxed(l2x0_lockdown, l2cache_base + 0x908); writel_relaxed(l2x0_lockdown, l2cache_base + 0x904); writel_relaxed(l2x0_lockdown, l2cache_base + 0x90C); /* * Doble Linefill, BRESP enabled, $I and $D prefetch ON, * Share-override = 1, NS lockdown enabled */ omap_smc1(0x109, l2x0_auxctrl); skip_auxctlr: /* Enable PL310 L2 Cache controller */ omap_smc1(0x102, 0x1); /* * 32KB way size, 16-way associativity, * parity disabled */ l2x0_init(l2cache_base, l2x0_auxctrl, 0xd0000fff); return 0; }
static inline void omap_dmic_write(struct omap_dmic *dmic, u16 reg, u32 val) { writel_relaxed(val, dmic->io_base + reg); }
static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val) { writel_relaxed(val, dev->base + reg); }
static int modem_pil_shutdown(struct pil_desc *pil) { u32 reg; const struct modem_data *drv = dev_get_drvdata(pil->dev); /* Put modem into reset */ writel_relaxed(0x1, drv->cbase + MARM_RESET); mb(); /* Put modem AHB0,1,2 clocks into reset */ writel_relaxed(BIT(0) | BIT(1), drv->cbase + MAHB0_SFAB_PORT_RESET); writel_relaxed(BIT(7), drv->cbase + MAHB1_CLK_CTL); writel_relaxed(BIT(7), drv->cbase + MAHB2_CLK_CTL); mb(); /* * Disable all of the marm_clk branches, cxo sourced marm branches, * and sleep clock branches */ writel_relaxed(0x0, drv->cbase + MARM_CLK_CTL); writel_relaxed(0x0, drv->cbase + MAHB0_CLK_CTL); writel_relaxed(0x0, drv->cbase + SFAB_MSS_S_HCLK_CTL); writel_relaxed(0x0, drv->cbase + MSS_MODEM_CXO_CLK_CTL); writel_relaxed(0x0, drv->cbase + MSS_SLP_CLK_CTL); writel_relaxed(0x0, drv->cbase + MSS_MARM_SYS_REF_CLK_CTL); /* Disable marm_clk */ reg = readl_relaxed(drv->cbase + MARM_CLK_SRC_CTL); reg &= ~0x2; writel_relaxed(reg, drv->cbase + MARM_CLK_SRC_CTL); /* Clear modem's votes for ahb clocks */ writel_relaxed(0x0, drv->cbase + MARM_CLK_BRANCH_ENA_VOTE); /* Clear modem's votes for PLLs */ writel_relaxed(0x0, drv->cbase + PLL_ENA_MARM); return 0; }
static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val) { writel_relaxed(val, xspi->regs + offset); }
static int modem_reset(struct pil_desc *pil) { u32 reg; const struct modem_data *drv = dev_get_drvdata(pil->dev); phys_addr_t start_addr = pil_get_entry_addr(pil); /* Put modem AHB0,1,2 clocks into reset */ writel_relaxed(BIT(0) | BIT(1), drv->cbase + MAHB0_SFAB_PORT_RESET); writel_relaxed(BIT(7), drv->cbase + MAHB1_CLK_CTL); writel_relaxed(BIT(7), drv->cbase + MAHB2_CLK_CTL); /* Vote for pll8 on behalf of the modem */ reg = readl_relaxed(drv->cbase + PLL_ENA_MARM); reg |= BIT(8); writel_relaxed(reg, drv->cbase + PLL_ENA_MARM); /* Wait for PLL8 to enable */ while (!(readl_relaxed(drv->cbase + PLL8_STATUS) & BIT(16))) cpu_relax(); /* Set MAHB1 divider to Div-5 to run MAHB1,2 and sfab at 79.8 Mhz*/ writel_relaxed(0x4, drv->cbase + MAHB1_NS); /* Vote for modem AHB1 and 2 clocks to be on on behalf of the modem */ reg = readl_relaxed(drv->cbase + MARM_CLK_BRANCH_ENA_VOTE); reg |= BIT(0) | BIT(1); writel_relaxed(reg, drv->cbase + MARM_CLK_BRANCH_ENA_VOTE); /* Source marm_clk off of PLL8 */ reg = readl_relaxed(drv->cbase + MARM_CLK_SRC_CTL); if ((reg & 0x1) == 0) { writel_relaxed(0x3, drv->cbase + MARM_CLK_SRC1_NS); reg |= 0x1; } else { writel_relaxed(0x3, drv->cbase + MARM_CLK_SRC0_NS); reg &= ~0x1; } writel_relaxed(reg | 0x2, drv->cbase + MARM_CLK_SRC_CTL); /* * Force core on and periph on signals to remain active during halt * for marm_clk and mahb2_clk */ writel_relaxed(0x6F, drv->cbase + MARM_CLK_FS); writel_relaxed(0x6F, drv->cbase + MAHB2_CLK_FS); /* * Enable all of the marm_clk branches, cxo sourced marm branches, * and sleep clock branches */ writel_relaxed(0x10, drv->cbase + MARM_CLK_CTL); writel_relaxed(0x10, drv->cbase + MAHB0_CLK_CTL); writel_relaxed(0x10, drv->cbase + SFAB_MSS_S_HCLK_CTL); writel_relaxed(0x10, drv->cbase + MSS_MODEM_CXO_CLK_CTL); writel_relaxed(0x10, drv->cbase + MSS_SLP_CLK_CTL); writel_relaxed(0x10, drv->cbase + MSS_MARM_SYS_REF_CLK_CTL); /* Wait for above clocks to be turned on */ while (readl_relaxed(drv->cbase + CLK_HALT_MSS_SMPSS_MISC_STATE) & (BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(4) | BIT(6))) cpu_relax(); /* Take MAHB0,1,2 clocks out of reset */ writel_relaxed(0x0, drv->cbase + MAHB2_CLK_CTL); writel_relaxed(0x0, drv->cbase + MAHB1_CLK_CTL); writel_relaxed(0x0, drv->cbase + MAHB0_SFAB_PORT_RESET); mb(); /* Setup exception vector table base address */ writel_relaxed(start_addr | 0x1, drv->base + MARM_BOOT_CONTROL); /* Wait for vector table to be setup */ mb(); /* Bring modem out of reset */ writel_relaxed(0x0, drv->cbase + MARM_RESET); return 0; }
static int mipi_dsi_panel_msm_power(int on) { int rc = 0; uint32_t lcdc_reset_cfg; /* I2C-controlled GPIO Expander -init of the GPIOs very late */ if (unlikely(!dsi_gpio_initialized)) { pmapp_disp_backlight_init(); rc = gpio_request(GPIO_DISPLAY_PWR_EN, "gpio_disp_pwr"); if (rc < 0) { pr_err("failed to request gpio_disp_pwr\n"); return rc; } if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf() || machine_is_msm8625_surf()) { rc = gpio_direction_output(GPIO_DISPLAY_PWR_EN, 1); if (rc < 0) { pr_err("failed to enable display pwr\n"); goto fail_gpio1; } rc = gpio_request(GPIO_BACKLIGHT_EN, "gpio_bkl_en"); if (rc < 0) { pr_err("failed to request gpio_bkl_en\n"); goto fail_gpio1; } rc = gpio_direction_output(GPIO_BACKLIGHT_EN, 1); if (rc < 0) { pr_err("failed to enable backlight\n"); goto fail_gpio2; } } rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs_dsi), regs_dsi); if (rc) { pr_err("%s: could not get regulators: %d\n", __func__, rc); goto fail_gpio2; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs_dsi), regs_dsi); if (rc) { pr_err("%s: could not set voltages: %d\n", __func__, rc); goto fail_vreg; } if (pmapp_disp_backlight_set_brightness(100)) pr_err("backlight set brightness failed\n"); dsi_gpio_initialized = 1; } if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf() || machine_is_msm8625_surf()) { gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, on); gpio_set_value_cansleep(GPIO_BACKLIGHT_EN, on); } else if (machine_is_msm7x27a_ffa() || machine_is_msm7625a_ffa() || machine_is_msm8625_ffa()) { if (on) { /* This line drives an active low pin on FFA */ rc = gpio_direction_output(GPIO_DISPLAY_PWR_EN, !on); if (rc < 0) pr_err("failed to set direction for " "display pwr\n"); } else { gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, !on); rc = gpio_direction_input(GPIO_DISPLAY_PWR_EN); if (rc < 0) pr_err("failed to set direction for " "display pwr\n"); } } if (on) { gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 0); if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf() || machine_is_msm8625_surf()) { lcdc_reset_cfg = readl_relaxed(lcdc_reset_ptr); rmb(); lcdc_reset_cfg &= ~1; writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr); msleep(20); wmb(); lcdc_reset_cfg |= 1; writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr); msleep(20); } else { gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 0); msleep(20); gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 1); msleep(20); } } else { gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 1); } rc = on ? regulator_bulk_enable(ARRAY_SIZE(regs_dsi), regs_dsi) : regulator_bulk_disable(ARRAY_SIZE(regs_dsi), regs_dsi); if (rc) pr_err("%s: could not %sable regulators: %d\n", __func__, on ? "en" : "dis", rc); return rc; fail_vreg: regulator_bulk_free(ARRAY_SIZE(regs_dsi), regs_dsi); fail_gpio2: gpio_free(GPIO_BACKLIGHT_EN); fail_gpio1: gpio_free(GPIO_DISPLAY_PWR_EN); dsi_gpio_initialized = 0; return rc; }
static int gfx2d_footswitch_enable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; mutex_lock(&claim_lock); fs->is_claimed = true; mutex_unlock(&claim_lock); regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) return 0; rc = setup_clocks(fs); if (rc) return rc; if (fs->bus_port0) { rc = msm_bus_axi_portunhalt(fs->bus_port0); if (rc) { pr_err("%s port 0 unhalt failed.\n", fs->desc.name); goto err; } } clk_disable_unprepare(fs->core_clk); for (clock = fs->clk_data; clock->clk; clock++) ; for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); udelay(RESET_DELAY_US); regval |= ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); mb(); udelay(1); regval &= ~CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); for (clock = fs->clk_data; clock->clk; clock++) clk_reset(clock->clk, CLK_RESET_DEASSERT); udelay(RESET_DELAY_US); clk_prepare_enable(fs->core_clk); clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); restore_clocks(fs); fs->is_enabled = true; return 0; err: restore_clocks(fs); return rc; }
static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe, u32 reg, u32 val) { writel_relaxed(val, pipe->base + reg); }
/* SSPHY Initialization */ static int msm_ssphy_qmp_init(struct usb_phy *uphy) { struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, phy); int ret; unsigned init_timeout_usec = INIT_MAX_TIME_USEC; u32 revid; const struct qmp_reg_val *reg = NULL, *misc = NULL; dev_dbg(uphy->dev, "Initializing QMP phy\n"); if (phy->emulation) return 0; if (!phy->clk_enabled) { ret = msm_ssphy_qmp_init_clocks(phy); if (ret) { dev_err(uphy->dev, "failed to init clocks %d\n", ret); return ret; } } /* Rev ID is made up each of the LSBs of REVISION_ID[0-3] */ revid = (readl_relaxed(phy->base + PCIE_USB3_PHY_REVISION_ID3) & 0xFF) << 24; revid |= (readl_relaxed(phy->base + PCIE_USB3_PHY_REVISION_ID2) & 0xFF) << 16; revid |= (readl_relaxed(phy->base + PCIE_USB3_PHY_REVISION_ID1) & 0xFF) << 8; revid |= readl_relaxed(phy->base + PCIE_USB3_PHY_REVISION_ID0) & 0xFF; switch (revid) { case 0x10000000: reg = qmp_settings_rev0; misc = qmp_settings_rev0_misc; break; case 0x10000001: reg = qmp_settings_rev1; misc = qmp_settings_rev1_misc; break; default: dev_err(uphy->dev, "Unknown revid 0x%x, cannot initialize PHY\n", revid); return -ENODEV; } /* Configure AHB2PHY for one wait state reads/writes */ if (phy->ahb2phy) writel_relaxed(0x11, phy->ahb2phy + PERIPH_SS_AHB2PHY_TOP_CFG); writel_relaxed(0x01, phy->base + PCIE_USB3_PHY_POWER_DOWN_CONTROL); /* Main configuration */ if (configure_phy_regs(uphy, reg)) { dev_err(uphy->dev, "Failed the main PHY configuration\n"); return ret; } /* Feature specific configurations */ if (phy->override_pll_cal) { reg = qmp_override_pll; if (configure_phy_regs(uphy, reg)) { dev_err(uphy->dev, "Failed the PHY PLL override configuration\n"); return ret; } } if (phy->misc_config) { configure_phy_regs(uphy, misc); dev_err(uphy->dev, "Failed the misc PHY configuration\n"); return ret; } writel_relaxed(0x00, phy->base + PCIE_USB3_PHY_SW_RESET); writel_relaxed(0x03, phy->base + PCIE_USB3_PHY_START); if (!phy->switch_pipe_clk_src) /* this clock wasn't enabled before, enable it now */ clk_prepare_enable(phy->pipe_clk); /* Wait for PHY initialization to be done */ do { if (readl_relaxed(phy->base + PCIE_USB3_PHY_PCS_STATUS) & PHYSTATUS) usleep(1); else break; } while (--init_timeout_usec); if (!init_timeout_usec) { dev_err(uphy->dev, "QMP PHY initialization timeout\n"); return -EBUSY; }; /* * After PHY initilization above, the PHY is generating * the usb3_pipe_clk in 125MHz. Therefore now we can (if needed) * switch the gcc_usb3_pipe_clk to 125MHz as well, so the * gcc_usb3_pipe_clk is sourced now from the usb3_pipe3_clk * instead of from the xo clock. */ if (phy->switch_pipe_clk_src) clk_set_rate(phy->pipe_clk, 125000000); return 0; }
static int sdhci_msm_probe(struct platform_device *pdev) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; struct resource *core_memres; int ret; u16 host_version, core_minor; u32 core_version, caps; u8 core_major; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); msm_host = sdhci_pltfm_priv(pltfm_host); msm_host->mmc = host->mmc; msm_host->pdev = pdev; ret = mmc_of_parse(host->mmc); if (ret) goto pltfm_free; sdhci_get_of_property(pdev); /* Setup SDCC bus voter clock. */ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (!IS_ERR(msm_host->bus_clk)) { /* Vote for max. clk rate for max. performance */ ret = clk_set_rate(msm_host->bus_clk, INT_MAX); if (ret) goto pltfm_free; ret = clk_prepare_enable(msm_host->bus_clk); if (ret) goto pltfm_free; } /* Setup main peripheral bus clock */ msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(msm_host->pclk)) { ret = PTR_ERR(msm_host->pclk); dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); goto bus_clk_disable; } ret = clk_prepare_enable(msm_host->pclk); if (ret) goto bus_clk_disable; /* Setup SDC MMC clock */ msm_host->clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(msm_host->clk)) { ret = PTR_ERR(msm_host->clk); dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); goto pclk_disable; } /* Vote for maximum clock rate for maximum performance */ ret = clk_set_rate(msm_host->clk, INT_MAX); if (ret) dev_warn(&pdev->dev, "core clock boost failed\n"); ret = clk_prepare_enable(msm_host->clk); if (ret) goto pclk_disable; core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); if (IS_ERR(msm_host->core_mem)) { dev_err(&pdev->dev, "Failed to remap registers\n"); ret = PTR_ERR(msm_host->core_mem); goto clk_disable; } /* Reset the core and Enable SDHC mode */ writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) | CORE_SW_RST, msm_host->core_mem + CORE_POWER); /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */ usleep_range(1000, 5000); if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) { dev_err(&pdev->dev, "Stuck in reset\n"); ret = -ETIMEDOUT; goto clk_disable; } /* Set HC_MODE_EN bit in HC_MODE register */ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT)); core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION); core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> CORE_VERSION_MAJOR_SHIFT; core_minor = core_version & CORE_VERSION_MINOR_MASK; dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", core_version, core_major, core_minor); /* * Support for some capabilities is not advertised by newer * controller versions and must be explicitly enabled. */ if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0); } /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", msm_host->pwr_irq); goto clk_disable; } ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); if (ret) { dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); goto clk_disable; } ret = sdhci_add_host(host); if (ret) goto clk_disable; return 0; clk_disable: clk_disable_unprepare(msm_host->clk); pclk_disable: clk_disable_unprepare(msm_host->pclk); bus_clk_disable: if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); pltfm_free: sdhci_pltfm_free(pdev); return ret; }
void imx_set_cpu_jump(int cpu, void *jump_addr) { cpu = cpu_logical_map(cpu); writel_relaxed(virt_to_phys(jump_addr), src_base + SRC_GPR1 + cpu * 8); }
static irqreturn_t qup_i2c_interrupt(int irq, void *devid) { struct qup_i2c_dev *dev = devid; uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS); uint32_t status1 = readl_relaxed(dev->base + QUP_ERROR_FLAGS); uint32_t op_flgs = readl_relaxed(dev->base + QUP_OPERATIONAL); int err = 0; if (!dev->msg || !dev->complete) { /* Clear Error interrupt if it's a level triggered interrupt*/ if (dev->num_irqs == 1) { writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE); /* Ensure that state is written before ISR exits */ mb(); } return IRQ_HANDLED; } if (status & I2C_STATUS_ERROR_MASK) { dev_err(dev->dev, "QUP: I2C status flags :0x%x, irq:%d\n", status, irq); err = status; /* Clear Error interrupt if it's a level triggered interrupt*/ if (dev->num_irqs == 1) { writel_relaxed(QUP_RESET_STATE, dev->base+QUP_STATE); /* Ensure that state is written before ISR exits */ mb(); } goto intr_done; } if (status1 & 0x7F) { dev_err(dev->dev, "QUP: QUP status flags :0x%x\n", status1); err = -status1; /* Clear Error interrupt if it's a level triggered interrupt*/ if (dev->num_irqs == 1) { writel_relaxed((status1 & QUP_STATUS_ERROR_FLAGS), dev->base + QUP_ERROR_FLAGS); /* Ensure that error flags are cleared before ISR * exits */ mb(); } goto intr_done; } if ((dev->num_irqs == 3) && (dev->msg->flags == I2C_M_RD) && (irq == dev->out_irq)) return IRQ_HANDLED; if (op_flgs & QUP_OUT_SVC_FLAG) { writel_relaxed(QUP_OUT_SVC_FLAG, dev->base + QUP_OPERATIONAL); /* Ensure that service flag is acknowledged before ISR exits */ mb(); } if (dev->msg->flags == I2C_M_RD) { if ((op_flgs & QUP_MX_INPUT_DONE) || (op_flgs & QUP_IN_SVC_FLAG)) { writel_relaxed(QUP_IN_SVC_FLAG, dev->base + QUP_OPERATIONAL); /* Ensure that service flag is acknowledged before ISR * exits */ mb(); } else return IRQ_HANDLED; } intr_done: dev_dbg(dev->dev, "QUP intr= %d, i2c status=0x%x, qup status = 0x%x\n", irq, status, status1); qup_print_status(dev); dev->err = err; complete(dev->complete); return IRQ_HANDLED; }
static int __init mps2_clockevent_init(struct device_node *np) { void __iomem *base; struct clk *clk = NULL; struct clockevent_mps2 *ce; u32 rate; int irq, ret; const char *name = "mps2-clkevt"; ret = of_property_read_u32(np, "clock-frequency", &rate); if (ret) { clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); pr_err("failed to get clock for clockevent: %d\n", ret); goto out; } ret = clk_prepare_enable(clk); if (ret) { pr_err("failed to enable clock for clockevent: %d\n", ret); goto out_clk_put; } rate = clk_get_rate(clk); } base = of_iomap(np, 0); if (!base) { ret = -EADDRNOTAVAIL; pr_err("failed to map register for clockevent: %d\n", ret); goto out_clk_disable; } irq = irq_of_parse_and_map(np, 0); if (!irq) { ret = -ENOENT; pr_err("failed to get irq for clockevent: %d\n", ret); goto out_iounmap; } ce = kzalloc(sizeof(*ce), GFP_KERNEL); if (!ce) { ret = -ENOMEM; goto out_iounmap; } ce->reg = base; ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ); ce->clkevt.irq = irq; ce->clkevt.name = name; ce->clkevt.rating = 200; ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->clkevt.cpumask = cpu_possible_mask; ce->clkevt.set_state_shutdown = mps2_timer_shutdown, ce->clkevt.set_state_periodic = mps2_timer_set_periodic, ce->clkevt.set_state_oneshot = mps2_timer_shutdown, ce->clkevt.set_next_event = mps2_timer_set_next_event; /* Ensure timer is disabled */ writel_relaxed(0, base + TIMER_CTRL); ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce); if (ret) { pr_err("failed to request irq for clockevent: %d\n", ret); goto out_kfree; } clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff); return 0; out_kfree: kfree(ce); out_iounmap: iounmap(base); out_clk_disable: /* clk_{disable, unprepare, put}() can handle NULL as a parameter */ clk_disable_unprepare(clk); out_clk_put: clk_put(clk); out: return ret; }
static void qup_issue_write(struct qup_i2c_dev *dev, struct i2c_msg *msg, int rem, int *idx, uint32_t *carry_over) { int entries = dev->cnt; int empty_sl = dev->wr_sz - ((*idx) >> 1); int i = 0; uint32_t val = 0; uint32_t last_entry = 0; uint16_t addr = msg->addr << 1; if (dev->pos == 0) { if (*idx % 4) { writel_relaxed(*carry_over | ((QUP_OUT_START | addr) << 16), dev->base + QUP_OUT_FIFO_BASE); qup_verify_fifo(dev, *carry_over | QUP_OUT_DATA << 16 | addr << 16, (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); } else val = QUP_OUT_START | addr; *idx += 2; i++; entries++; } else { /* Avoid setp time issue by adding 1 NOP when number of bytes * are more than FIFO/BLOCK size. setup time issue can't appear * otherwise since next byte to be written will always be ready */ val = (QUP_OUT_NOP | 1); *idx += 2; i++; entries++; } if (entries > empty_sl) entries = empty_sl; for (; i < (entries - 1); i++) { if (*idx % 4) { writel_relaxed(val | ((QUP_OUT_DATA | msg->buf[dev->pos]) << 16), dev->base + QUP_OUT_FIFO_BASE); qup_verify_fifo(dev, val | QUP_OUT_DATA << 16 | msg->buf[dev->pos] << 16, (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); } else val = QUP_OUT_DATA | msg->buf[dev->pos]; (*idx) += 2; dev->pos++; } if (dev->pos < (msg->len - 1)) last_entry = QUP_OUT_DATA; else if (rem > 1) /* not last array entry */ last_entry = QUP_OUT_DATA; else last_entry = QUP_OUT_STOP; if ((*idx % 4) == 0) { /* * If read-start and read-command end up in different fifos, it * may result in extra-byte being read due to extra-read cycle. * Avoid that by inserting NOP as the last entry of fifo only * if write command(s) leave 1 space in fifo. */ if (rem > 1) { struct i2c_msg *next = msg + 1; if (next->addr == msg->addr && (next->flags | I2C_M_RD) && *idx == ((dev->wr_sz*2) - 4)) { writel_relaxed(((last_entry | msg->buf[dev->pos]) | ((1 | QUP_OUT_NOP) << 16)), dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ *idx += 2; } else *carry_over = (last_entry | msg->buf[dev->pos]); } else { writel_relaxed((last_entry | msg->buf[dev->pos]), dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ qup_verify_fifo(dev, last_entry | msg->buf[dev->pos], (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx), 0); } } else { writel_relaxed(val | ((last_entry | msg->buf[dev->pos]) << 16), dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ qup_verify_fifo(dev, val | (last_entry << 16) | (msg->buf[dev->pos] << 16), (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); } *idx += 2; dev->pos++; dev->cnt = msg->len - dev->pos; }
static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset) { writel_relaxed(val, to_mps2_clkevt(c)->reg + offset); }
static void __init gic_dist_init(struct gic_chip_data *gic, unsigned int irq_start) { unsigned int gic_irqs, irq_limit, i; u32 cpumask; void __iomem *base = gic->dist_base; u32 cpu = 0; u32 nrppis = 0, ppi_base = 0; #ifdef CONFIG_SMP cpu = cpu_logical_map(smp_processor_id()); #endif cpumask = 1 << cpu; cpumask |= cpumask << 8; cpumask |= cpumask << 16; writel_relaxed(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f; gic_irqs = (gic_irqs + 1) * 32; if (gic_irqs > 1020) gic_irqs = 1020; gic->gic_irqs = gic_irqs; /* * Nobody would be insane enough to use PPIs on a secondary * GIC, right? */ if (gic == &gic_data[0]) { nrppis = (32 - irq_start) & 31; /* The GIC only supports up to 16 PPIs. */ if (nrppis > 16) BUG(); ppi_base = gic->irq_offset + 32 - nrppis; } pr_info("Configuring GIC with %d sources (%d PPIs)\n", gic_irqs, (gic == &gic_data[0]) ? nrppis : 0); /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < gic_irqs; i += 16) writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); /* * Set all global interrupts to this CPU only. */ for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); /* * Set priority on all global interrupts. */ for (i = 32; i < gic_irqs; i += 4) writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); /* * Disable all interrupts. Leave the PPI and SGIs alone * as these enables are banked registers. */ for (i = 32; i < gic_irqs; i += 32) writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); /* * Limit number of interrupts registered to the platform maximum */ irq_limit = gic->irq_offset + gic_irqs; if (WARN_ON(irq_limit > NR_IRQS)) irq_limit = NR_IRQS; /* * Setup the Linux IRQ subsystem. */ for (i = 0; i < nrppis; i++) { int ppi = i + ppi_base; irq_set_percpu_devid(ppi); irq_set_chip_and_handler(ppi, &gic_chip, handle_percpu_devid_irq); irq_set_chip_data(ppi, gic); set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN); } for (i = irq_start + nrppis; i < irq_limit; i++) { irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); irq_set_chip_data(i, gic); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } writel_relaxed(1, base + GIC_DIST_CTRL); }
/* The following operations are needed by XDB */ static inline void ptm_enable_access(void) { writel_relaxed(0xC5ACCE55, PTM_LOCK); }
static inline void spi_enable_chip(struct rockchip_spi *rs, int enable) { writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR); }