/** * omap2_prm_assert_hardreset - assert the HW reset line of a submodule * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @shift: register bit shift corresponding to the reset line to assert * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift) { u32 mask; if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) return -EINVAL; mask = 1 << shift; omap2_prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL); return 0; }
u32 omap_prcm_get_reset_sources(void) { /* XXX This presumably needs modification for 34XX */ if (cpu_is_omap24xx()) return omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f; if (cpu_is_omap34xx()) return omap2_prm_read_mod_reg(0xA00, OMAP2_RM_RSTST) & 0x7f; if (cpu_is_omap44xx()) return omap2_prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f; return 0; }
void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1) { omap_serial_early_init(); omap_hwmod_late_init(); if (cpu_is_omap24xx() || cpu_is_omap34xx()) { omap2_sdrc_init(sdrc_cs0, sdrc_cs1); _omap2_init_reprogram_sdrc(); } omap_irq_base_init(); }
/** * _omap2xxx_clk_commit - commit clock parent/rate changes in hardware * @clk: struct clk * * * If @clk has the DELAYED_APP flag set, meaning that parent/rate changes * don't take effect until the VALID_CONFIG bit is written, write the * VALID_CONFIG bit and wait for the write to complete. No return value. */ static void _omap2xxx_clk_commit(struct clk *clk) { if (!cpu_is_omap24xx()) return; if (!(clk->flags & DELAYED_APP)) return; prm_write_mod_reg(OMAP24XX_VALID_CONFIG, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKCFG_CTRL_OFFSET); /* OCP barrier */ prm_read_mod_reg(OMAP24XX_GR_MOD, OMAP2_PRCM_CLKCFG_CTRL_OFFSET); }
static int __init omap_i2c_nr_ports(void) { int ports = 0; if (cpu_class_is_omap1()) ports = 1; else if (cpu_is_omap24xx()) ports = 2; else if (cpu_is_omap34xx()) ports = 3; return ports; }
static void omap_init_pmu(void) { if (cpu_is_omap24xx()) omap_pmu_device.resource = &omap2_pmu_resource; else if (cpu_is_omap34xx()) omap_pmu_device.resource = &omap3_pmu_resource; else if (cpu_is_omap44xx()) omap_pmu_device.resource = &omap4_pmu_resource; else return; platform_device_register(&omap_pmu_device); }
/* * Switch the MPU rate if specified on cmdline. We cannot do this * early until cmdline is parsed. XXX This should be removed from the * clock code and handled by the OPP layer code in the near future. */ static int __init omap2xxx_clk_arch_init(void) { int ret; if (!cpu_is_omap24xx()) return 0; ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); if (!ret) omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); return ret; }
void dss_save_context(void) { if (cpu_is_omap24xx()) return; SR(SYSCONFIG); SR(CONTROL); #ifdef CONFIG_OMAP2_DSS_SDI SR(SDI_CONTROL); SR(PLL_CONTROL); #endif }
void omap_start_dma(int lch) { u32 l; if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; dma_chan_link_map[lch] = 1; /* Set the link register of the first channel */ enable_lnk(lch); memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); cur_lch = dma_chan[lch].next_lch; do { next_lch = dma_chan[cur_lch].next_lch; /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; enable_lnk(cur_lch); omap_enable_channel_irq(cur_lch); cur_lch = next_lch; } while (next_lch != -1); } else if (cpu_is_omap242x() || (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { /* Errata: Need to write lch even if not using chaining */ dma_write(lch, CLNK_CTRL(lch)); } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); /* * Errata: On ES2.0 BUFFERING disable must be set. * This will always fail on ES1.0 */ if (cpu_is_omap24xx()) l |= OMAP_DMA_CCR_EN; l |= OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; }
/* Given a new render window in new_win, adjust the window to the * nearest supported configuration. The image cropping window in crop * will also be adjusted if necessary. Preference is given to keeping the * the window as close to the requested configuration as possible. If * successful, new_win, vout->win, and crop are updated. * Returns zero if succesful, or -EINVAL if the requested preview window is * impossible and cannot reasonably be adjusted. */ int omap_vout_new_window(struct v4l2_rect *crop, struct v4l2_window *win, struct v4l2_framebuffer *fbuf, struct v4l2_window *new_win) { int err; err = omap_vout_try_window(fbuf, new_win); if (err) return err; /* update our preview window */ win->w = new_win->w; win->field = new_win->field; win->chromakey = new_win->chromakey; if (cpu_is_omap24xx() || !machine_has_isp()) { /* adjust the cropping window to allow for resizing * limitations. 24xx allow 8x to 1/2x scaling. */ if ((crop->height/win->w.height) >= 2) { /* The maximum vertical downsizing ratio is 2:1 */ crop->height = win->w.height * 2; } if ((crop->width/win->w.width) >= 2) { /* The maximum horizontal downsizing ratio is 2:1 */ crop->width = win->w.width * 2; } if (crop->width > 768) { /* The OMAP2420 vertical resizing line buffer is 768 * pixels wide. If the cropped image is wider than * 768 pixels then it cannot be vertically resized. */ if (crop->height != win->w.height) crop->width = 768; } } else { /* adjust the cropping window to allow for resizing * limitations 34xx allow 8x to 1/8x scaling. */ if ((crop->height/win->w.height) >= 8) { /* The maximum vertical downsizing ratio is 8:1 */ crop->height = win->w.height * 8; } if ((crop->width/win->w.width) >= 8) { /* The maximum horizontal downsizing ratio is 8:1 */ crop->width = win->w.width * 8; } } return 0; }
static void omap_set_vpp(struct map_info *map, int enable) { static int count; if ((!cpu_is_omap24xx()) && (!cpu_is_omap34xx()) ) { if (enable) { if (count++ == 0) OMAP_EMIFS_CONFIG_REG |= OMAP_EMIFS_CONFIG_WP; } else { if (count && (--count == 0)) OMAP_EMIFS_CONFIG_REG &= ~OMAP_EMIFS_CONFIG_WP; } } }
static void omap_init_sham(void) { if (cpu_is_omap24xx()) { sham_device.resource = omap2_sham_resources; sham_device.num_resources = omap2_sham_resources_sz; } else if (cpu_is_omap34xx() && !cpu_is_am33xx()) { sham_device.resource = omap3_sham_resources; sham_device.num_resources = omap3_sham_resources_sz; } else { pr_err("%s: platform not supported\n", __func__); return; } platform_device_register(&sham_device); }
int __init omap2_mux_init(void) { if (cpu_is_omap24xx()) { arch_mux_cfg.pins = omap24xx_pins; arch_mux_cfg.size = OMAP24XX_PINS_SZ; arch_mux_cfg.cfg_reg = omap24xx_cfg_reg; } else if (cpu_is_omap34xx()) { arch_mux_cfg.pins = omap34xx_pins; arch_mux_cfg.size = OMAP34XX_PINS_SZ; arch_mux_cfg.cfg_reg = omap34xx_cfg_reg; } return omap_mux_register(&arch_mux_cfg); }
void omap_prcm_restart(char mode, const char *cmd) { s16 prcm_offs = 0; if (cpu_is_omap24xx()) { omap2xxx_clk_prepare_for_reboot(); prcm_offs = WKUP_MOD; } else if (cpu_is_omap34xx()) { prcm_offs = OMAP3430_GR_MOD; omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); } else if (cpu_is_omap44xx()) { omap4_prminst_global_warm_sw_reset(); /* */ } else { WARN_ON(1); } /* */ /* */ omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs, OMAP2_RM_RSTCTRL); omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* */ }
static int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_is_omap24xx()) { clocksource_32k.mult = clocksource_hz2mult(32768, clocksource_32k.shift); if (clocksource_register(&clocksource_32k)) printk(err, clocksource_32k.name); } return 0; }
static void omap_init_aes(void) { if (cpu_is_omap24xx()) { aes_device.resource = omap2_aes_resources; aes_device.num_resources = omap2_aes_resources_sz; } else if (cpu_is_omap34xx()) { aes_device.resource = omap3_aes_resources; aes_device.num_resources = omap3_aes_resources_sz; } else { pr_err("%s: platform not supported\n", __func__); return; } platform_device_register(&aes_device); }
static inline void omap_init_mbox(void) { if (cpu_is_omap24xx()) { mbox_device.resource = omap2_mbox_resources; mbox_device.num_resources = omap2_mbox_resources_sz; } else if (cpu_is_omap34xx()) { mbox_device.resource = omap3_mbox_resources; mbox_device.num_resources = omap3_mbox_resources_sz; } else { pr_err("%s: platform not supported\n", __func__); return; } platform_device_register(&mbox_device); }
/* Resets clock rates and reboots the system. Only called from system.h */ void omap_prcm_restart(char mode, const char *cmd) { s16 prcm_offs = 0; if (cpu_is_omap24xx()) { omap2xxx_clk_prepare_for_reboot(); prcm_offs = WKUP_MOD; } else if (cpu_is_omap34xx()) { prcm_offs = OMAP3430_GR_MOD; omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); } else if (cpu_is_omap44xx()) { omap4_prminst_global_warm_sw_reset(); /* never returns */ } else { WARN_ON(1); } /* * As per Errata i520, in some cases, user will not be able to * access DDR memory after warm-reset. * This situation occurs while the warm-reset happens during a read * access to DDR memory. In that particular condition, DDR memory * does not respond to a corrupted read command due to the warm * reset occurrence but SDRC is waiting for read completion. * SDRC is not sensitive to the warm reset, but the interconnect is * reset on the fly, thus causing a misalignment between SDRC logic, * interconnect logic and DDR memory state. * WORKAROUND: * Steps to perform before a Warm reset is trigged: * 1. enable self-refresh on idle request * 2. put SDRC in idle * 3. wait until SDRC goes to idle * 4. generate SW reset (Global SW reset) * * Steps to be performed after warm reset occurs (in bootloader): * if HW warm reset is the source, apply below steps before any * accesses to SDRAM: * 1. Reset SMS and SDRC and wait till reset is complete * 2. Re-initialize SMS, SDRC and memory * * NOTE: Above work around is required only if arch reset is implemented * using Global SW reset(GLOBAL_SW_RST). DPLL3 reset does not need * the WA since it resets SDRC as well as part of cold reset. */ /* XXX should be moved to some OMAP2/3 specific code */ omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs, OMAP2_RM_RSTCTRL); omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */ }
static int __init omap_rng_probe(struct platform_device *pdev) { struct resource *res, *mem; int ret; /* * A bit ugly, and it will never actually happen but there can * be only one RNG and this catches any bork */ BUG_ON(rng_dev); if (cpu_is_omap24xx()) { rng_ick = clk_get(NULL, "rng_ick"); if (IS_ERR(rng_ick)) { dev_err(&pdev->dev, "Could not get rng_ick\n"); ret = PTR_ERR(rng_ick); return ret; } else clk_enable(rng_ick); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; mem = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (mem == NULL) return -EBUSY; dev_set_drvdata(&pdev->dev, mem); rng_base = (u32 __iomem *)io_p2v(res->start); ret = hwrng_register(&omap_rng_ops); if (ret) { release_resource(mem); rng_base = NULL; return ret; } dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", omap_rng_read_reg(RNG_REV_REG)); omap_rng_write_reg(RNG_MASK_REG, 0x1); rng_dev = pdev; return 0; }
int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) { void __iomem *src_addr; u32 field_val, field_mask, reg_val, parent_div; if (unlikely(clk->flags & CONFIG_PARTICIPANT)) return -EINVAL; if (!clk->clksel) return -EINVAL; field_val = omap2_clksel_get_src_field(&src_addr, new_parent, &field_mask, clk, &parent_div); if (src_addr == 0) return -EINVAL; if (clk->usecount > 0) _omap2_clk_disable(clk); /* Set new source value (previous dividers if any in effect) */ reg_val = __raw_readl(src_addr) & ~field_mask; reg_val |= (field_val << __ffs(field_mask)); __raw_writel(reg_val, src_addr); wmb(); if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) { __raw_writel(OMAP24XX_VALID_CONFIG, OMAP24XX_PRCM_CLKCFG_CTRL); wmb(); } if (clk->usecount > 0) _omap2_clk_enable(clk); clk->parent = new_parent; /* CLKSEL clocks follow their parents' rates, divided by a divisor */ clk->rate = new_parent->rate; if (parent_div > 0) clk->rate /= parent_div; pr_debug("clock: set parent of %s to %s (new rate %ld)\n", clk->name, clk->parent->name, clk->rate); if (unlikely(clk->flags & RATE_PROPAGATES)) propagate_rate(clk); return 0; }
/* * Initialize asm_irq_base for entry-macro.S */ static inline void omap_irq_base_init(void) { extern void __iomem *omap_irq_base; #ifdef MULTI_OMAP2 if (cpu_is_omap24xx()) omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE); else if (cpu_is_omap34xx() || cpu_is_ti81xx()) omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE); else if (cpu_is_omap44xx()) omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE); else pr_err("Could not initialize omap_irq_base\n"); #endif }
void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, struct omap_sdrc_params *sdrc_cs1) { pwrdm_init(powerdomains_omap); clkdm_init(clockdomains_omap, clkdm_autodeps); if (cpu_is_omap242x()) omap2420_hwmod_init(); else if (cpu_is_omap243x()) omap2430_hwmod_init(); else if (cpu_is_omap34xx()) omap3xxx_hwmod_init(); omap2_mux_init(); /* The OPP tables have to be registered before a clk init */ omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps); if (cpu_is_omap2420()) omap2420_clk_init(); else if (cpu_is_omap2430()) omap2430_clk_init(); else if (cpu_is_omap34xx()) omap3xxx_clk_init(); else if (cpu_is_omap44xx()) omap4xxx_clk_init(); else pr_err("Could not init clock framework - unknown CPU\n"); omap_serial_early_init(); if (cpu_is_omap24xx() || cpu_is_omap34xx()) /* FIXME: OMAP4 */ omap_hwmod_late_init(); omap_pm_if_init(); if (cpu_is_omap24xx() || cpu_is_omap34xx()) { omap2_sdrc_init(sdrc_cs0, sdrc_cs1); _omap2_init_reprogram_sdrc(); } gpmc_init(); }
static int __exit omap_rng_remove(struct platform_device *pdev) { hwrng_unregister(&omap_rng_ops); omap_rng_write_reg(RNG_MASK_REG, 0x0); if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } rng_base = NULL; return 0; }
static void omap_init_pmu(void) { if (cpu_is_omap24xx()) omap_pmu_device.resource = &omap2_pmu_resource; else if (cpu_is_omap34xx()) omap_pmu_device.resource = &omap3_pmu_resource; #ifdef CONFIG_PMU_DEP_CTI else if (cpu_is_omap44xx()) omap_pmu_device.resource = omap4_pmu_resource; #endif else return; platform_device_register(&omap_pmu_device); }
static void omap_init_pmu(void) { if (cpu_is_omap24xx()) omap_pmu_device.resource = &omap2_pmu_resource; else if (cpu_is_omap34xx()) omap_pmu_device.resource = &omap3_pmu_resource; else if (cpu_is_omap44xx()) { omap_pmu_device.resource = omap4_pmu_resource; omap_pmu_device.num_resources = 2; omap_pmu_device.dev.platform_data = &omap4_pmu_data; omap4_configure_pmu_irq(); } else return; platform_device_register(&omap_pmu_device); }
static int __devexit omap_kp_remove(struct platform_device *pdev) { struct omap_kp *omap_kp = platform_get_drvdata(pdev); /* disable keypad interrupt handling */ tasklet_disable(&kp_tasklet); if (!cpu_is_omap24xx()) { omap_kp->irq = platform_get_irq(pdev, 0); if (!cpu_is_omap44xx()) { omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); free_irq(omap_kp->irq, 0); } else { free_irq(152, omap_kp); } } else { int i; for (i = 0; i < omap_kp->cols; i++) gpio_free(col_gpios[i]); for (i = 0; i < omap_kp->rows; i++) { gpio_free(row_gpios[i]); free_irq(gpio_to_irq(row_gpios[i]), 0); } } del_timer_sync(&omap_kp->timer); tasklet_kill(&kp_tasklet); if (cpu_is_omap44xx()) { clk_disable(omap_kp->cclk); clk_put(omap_kp->cclk); } /* unregister everything */ input_unregister_device(omap_kp->input); device_remove_file(&pdev->dev, &dev_attr_enable); #ifdef FACTORY_AT_COMMAND_GKPD device_remove_file(&pdev->dev, &dev_attr_key_test_mode); wake_lock_destroy(&key_wake_lock); #endif kfree(omap_kp); return 0; }
/* Resets clock rates and reboots the system. Only called from system.h */ void omap_prcm_arch_reset(char mode) { #ifdef CONFIG_OMAP3_PM omap_prcm_arch_pm_reset(mode); #else s16 prcm_offs; if (cpu_is_omap24xx()) prcm_offs = WKUP_MOD; else if (cpu_is_omap34xx()) prcm_offs = OMAP3430_GR_MOD; else WARN_ON(1); prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs, RM_RSTCTRL); #endif }
/* * Note: We don't need special code here for INVERT_ENABLE * for the time being since INVERT_ENABLE only applies to clocks enabled by * CM_CLKEN_PLL */ static void omap2_clk_wait_ready(struct clk *clk) { void __iomem *reg, *other_reg, *st_reg; u32 bit; /* * REVISIT: This code is pretty ugly. It would be nice to generalize * it and pull it into struct clk itself somehow. */ reg = clk->enable_reg; if ((((u32)reg & 0xff) >= CM_FCLKEN1) && (((u32)reg & 0xff) <= OMAP24XX_CM_FCLKEN2)) other_reg = (void __iomem *)(((u32)reg & ~0xf0) | 0x10); /* CM_ICLKEN* */ else if ((((u32)reg & 0xff) >= CM_ICLKEN1) && (((u32)reg & 0xff) <= OMAP24XX_CM_ICLKEN4)) other_reg = (void __iomem *)(((u32)reg & ~0xf0) | 0x00); /* CM_FCLKEN* */ else return; /* REVISIT: What are the appropriate exclusions for 34XX? */ /* No check for DSS or cam clocks */ if (cpu_is_omap24xx() && ((u32)reg & 0x0f) == 0) { /* CM_{F,I}CLKEN1 */ if (clk->enable_bit == OMAP24XX_EN_DSS2_SHIFT || clk->enable_bit == OMAP24XX_EN_DSS1_SHIFT || clk->enable_bit == OMAP24XX_EN_CAM_SHIFT) return; } /* REVISIT: What are the appropriate exclusions for 34XX? */ /* OMAP3: ignore DSS-mod clocks */ if (cpu_is_omap34xx() && (((u32)reg & ~0xff) == (u32)OMAP_CM_REGADDR(OMAP3430_DSS_MOD, 0) || ((((u32)reg & ~0xff) == (u32)OMAP_CM_REGADDR(CORE_MOD, 0)) && clk->enable_bit == OMAP3430_EN_SSI_SHIFT))) return; /* Check if both functional and interface clocks * are running. */ bit = 1 << clk->enable_bit; if (!(__raw_readl(other_reg) & bit)) return; st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */ omap2_wait_clock_ready(st_reg, bit, clk->name); }
void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) { gpmc_onenand_data = _onenand_data; gpmc_onenand_data->onenand_setup = gpmc_onenand_setup; gpmc_onenand_device.dev.platform_data = gpmc_onenand_data; if (cpu_is_omap24xx() && (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) { printk(KERN_ERR "Onenand using only SYNC_READ on 24xx\n"); gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE; gpmc_onenand_data->flags |= ONENAND_SYNC_READ; } if (platform_device_register(&gpmc_onenand_device) < 0) { printk(KERN_ERR "Unable to register OneNAND device\n"); return; } }
static int __exit omap_rng_remove(struct platform_device *pdev) { struct resource *mem = dev_get_drvdata(&pdev->dev); hwrng_unregister(&omap_rng_ops); omap_rng_write_reg(RNG_MASK_REG, 0x0); if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } release_resource(mem); rng_base = NULL; return 0; }