static void __init combiner_init(void __iomem *combiner_base, struct device_node *np) { int i, irq, irq_base; unsigned int nr_irq, soc_max_nr; soc_max_nr = (soc_is_exynos5250() || soc_is_exynos542x()) ? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR; if (np) { if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { pr_warning("%s: number of combiners not specified, " "setting default as %d.\n", __func__, EXYNOS4_MAX_COMBINER_NR); max_nr = EXYNOS4_MAX_COMBINER_NR; } } else { max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR; } nr_irq = max_nr * MAX_IRQ_IN_COMBINER; irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0); if (IS_ERR_VALUE(irq_base)) { irq_base = COMBINER_IRQ(0, 0); pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base); } combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0, &combiner_irq_domain_ops, &combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warning("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { combiner_init_one(i, combiner_base + (i >> 2) * 0x10); irq = IRQ_SPI(i); #ifdef CONFIG_OF if (np) irq = irq_of_parse_and_map(np, i); #endif combiner_cascade_irq(i, irq); } #ifdef CONFIG_PM /* Setup suspend/resume combiner saving */ cpu_pm_register_notifier(&combiner_notifier_block); #endif }
void __init exynos_dwmci_set_platdata(struct dw_mci_board *pd, u32 slot_id) { struct dw_mci_board *npd = NULL; if ((soc_is_exynos4210()) || soc_is_exynos4212() || soc_is_exynos4412()) { npd = s3c_set_platdata(pd, sizeof(struct dw_mci_board), &exynos4_device_dwmci); } else if (soc_is_exynos5250()) { if (slot_id < ARRAY_SIZE(exynos5_dwmci_devs)) npd = s3c_set_platdata(pd, sizeof(struct dw_mci_board), exynos5_dwmci_devs[slot_id]); else pr_err("%s: slot %d is not supported\n", __func__, slot_id); } if (!npd) return; if (!npd->init) npd->init = exynos_dwmci_init; if (!npd->get_bus_wd) npd->get_bus_wd = exynos_dwmci_get_bus_wd; if (!npd->set_io_timing) npd->set_io_timing = exynos_dwmci_set_io_timing; if (!npd->get_ocr) npd->get_ocr = exynos_dwmci_get_ocr; }
int s3c64xx_spi0_cfg_gpio(struct platform_device *dev) { int gpio; if (soc_is_exynos5410()) { s3c_gpio_cfgpin(EXYNOS5410_GPA2(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS5410_GPA2(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS5410_GPA2(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); for (gpio = EXYNOS5410_GPA2(0); gpio < EXYNOS5410_GPA2(4); gpio++) s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV3); } else if (soc_is_exynos5250()) { s3c_gpio_cfgpin(EXYNOS5_GPA2(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS5_GPA2(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS5_GPA2(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); for (gpio = EXYNOS5_GPA2(0); gpio < EXYNOS5_GPA2(4); gpio++) s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV3); } else { s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); for (gpio = EXYNOS4_GPB(0); gpio < EXYNOS4_GPB(4); gpio++) s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV3); } return 0; }
void s3c_i2c4_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos4210()) s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2, S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP); else if (soc_is_exynos4212() || soc_is_exynos4412() || soc_is_exynos4415()) s3c_gpio_cfgall_range(EXYNOS4_GPB(0), 2, S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP); else if (soc_is_exynos5250()) s3c_gpio_cfgall_range(EXYNOS5_GPA2(0), 2, S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP); else if (soc_is_exynos5260()) s3c_gpio_cfgall_range(EXYNOS5260_GPB5(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else if (soc_is_exynos3250()) s3c_gpio_cfgall_range(EXYNOS3_GPB(0), 2, S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP); else pr_err("failed to configure gpio for i2c4\n"); }
static int __init exynos_pmu_init(void) { unsigned int value; exynos_pmu_config = exynos4210_pmu_config; if (soc_is_exynos4210()) { exynos_pmu_config = exynos4210_pmu_config; pr_info("EXYNOS4210 PMU Initialize\n"); } else if (soc_is_exynos4212() || soc_is_exynos4412()) { exynos_pmu_config = exynos4x12_pmu_config; pr_info("EXYNOS4x12 PMU Initialize\n"); } else if (soc_is_exynos5250()) { /* * When SYS_WDTRESET is set, watchdog timer reset request * is ignored by power management unit. */ value = __raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE); value &= ~EXYNOS5_SYS_WDTRESET; __raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE); value = __raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST); value &= ~EXYNOS5_SYS_WDTRESET; __raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST); exynos_pmu_config = exynos5250_pmu_config; pr_info("EXYNOS5250 PMU Initialize\n"); } else { pr_info("EXYNOS: PMU not supported\n"); } return 0; }
static int __init exynos_pmu_init(void) { unsigned int value; exynos_pmu_config = exynos4210_pmu_config; if (soc_is_exynos3250()) { /* * To prevent form issuing new bus request form L2 memory system * If core status is power down, should be set '1' to L2 power down */ value = __raw_readl(EXYNOS3_ARM_COMMON_OPTION); value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN; __raw_writel(value, EXYNOS3_ARM_COMMON_OPTION); /* Enable USE_STANDBY_WFI for all CORE */ __raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION); /* * Set PSHOLD port for ouput high */ value = __raw_readl(S5P_PS_HOLD_CONTROL); value |= S5P_PS_HOLD_OUTPUT_HIGH; __raw_writel(value, S5P_PS_HOLD_CONTROL); /* * Enable signal for PSHOLD port */ value = __raw_readl(S5P_PS_HOLD_CONTROL); value |= S5P_PS_HOLD_EN; __raw_writel(value, S5P_PS_HOLD_CONTROL); exynos_pmu_config = exynos3250_pmu_config; pr_info("EXYNOS3250 PMU Initialize\n"); } else if (soc_is_exynos4210()) { exynos_pmu_config = exynos4210_pmu_config; pr_info("EXYNOS4210 PMU Initialize\n"); } else if (soc_is_exynos4212() || soc_is_exynos4412()) { exynos_pmu_config = exynos4x12_pmu_config; pr_info("EXYNOS4x12 PMU Initialize\n"); } else if (soc_is_exynos5250()) { /* * When SYS_WDTRESET is set, watchdog timer reset request * is ignored by power management unit. */ value = __raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE); value &= ~EXYNOS5_SYS_WDTRESET; __raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE); value = __raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST); value &= ~EXYNOS5_SYS_WDTRESET; __raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST); exynos_pmu_config = exynos5250_pmu_config; pr_info("EXYNOS5250 PMU Initialize\n"); } else { pr_info("EXYNOS: PMU not supported\n"); } return 0; }
void s5p_tv_setup(void) { int ret; /* direct HPD to HDMI chip */ if (soc_is_exynos4412()) { gpio_request(GPIO_HDMI_HPD, "hpd-plug"); gpio_direction_input(GPIO_HDMI_HPD); s3c_gpio_cfgpin(GPIO_HDMI_HPD, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_HDMI_HPD, S3C_GPIO_PULL_NONE); } else if (soc_is_exynos5250()) { gpio_request(GPIO_HDMI_HPD, "hpd-plug"); gpio_direction_input(GPIO_HDMI_HPD); s3c_gpio_cfgpin(GPIO_HDMI_HPD, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_HDMI_HPD, S3C_GPIO_PULL_NONE); /* HDMI CEC */ gpio_request(GPIO_HDMI_CEC, "hdmi-cec"); gpio_direction_input(GPIO_HDMI_CEC); s3c_gpio_cfgpin(GPIO_HDMI_CEC, S3C_GPIO_SFN(0x3)); s3c_gpio_setpull(GPIO_HDMI_CEC, S3C_GPIO_PULL_NONE); } else { printk(KERN_ERR "HPD GPIOs are not defined!\n"); } }
void s3c_i2c1_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5250()) s3c_gpio_cfgall_range(EXYNOS5_GPB3(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else if (soc_is_exynos5260()) s3c_gpio_cfgall_range(EXYNOS5260_GPB4(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else if (soc_is_exynos5410()) s3c_gpio_cfgall_range(EXYNOS5410_GPB3(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else if (soc_is_exynos5420()) s3c_gpio_cfgall_range(EXYNOS5420_GPB3(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else if (soc_is_exynos3250()) s3c_gpio_cfgall_range(EXYNOS3_GPD1(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else /* EXYNOS4210, EXYNOS4212, and EXYNOS4412 */ s3c_gpio_cfgall_range(EXYNOS4_GPD1(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); }
void s3c_adc_phy_init(void) { u32 reg; if (soc_is_exynos5250() || soc_is_exynos4415() || soc_is_exynos3470() || soc_is_exynos3250()) { reg = __raw_readl(EXYNOS5250_ADC_PHY_CONTROL); reg |= EXYNOS5_ADC_PHY_ENABLE; __raw_writel(reg, EXYNOS5250_ADC_PHY_CONTROL); } else if (soc_is_exynos5410() || soc_is_exynos5420()) { reg = __raw_readl(EXYNOS5410_ADC_PHY_CONTROL); reg |= EXYNOS5_ADC_PHY_ENABLE; __raw_writel(reg, EXYNOS5410_ADC_PHY_CONTROL); } else if (soc_is_exynos5260()) { /* ADC phy select */ reg = readl(EXYNOS5260_SYSCON_PERI_PHY_SELECT); if (reg & EXYNOS5260_SYSCON_PERI_PHY_SELECT_ISP_ONLY) reg &= ~(EXYNOS5260_SYSCON_PERI_PHY_SELECT_ISP_ONLY); writel(reg, EXYNOS5260_SYSCON_PERI_PHY_SELECT); reg = __raw_readl(EXYNOS5260_ADC_PHY_CONTROL); reg |= EXYNOS5_ADC_PHY_ENABLE; __raw_writel(reg, EXYNOS5260_ADC_PHY_CONTROL); } }
int s3c_irq_wake(struct irq_data *data, unsigned int state) { unsigned long irqbit; unsigned int irq_rtc_tic, irq_rtc_alarm; #ifdef CONFIG_ARCH_EXYNOS if (soc_is_exynos5250()) { irq_rtc_tic = EXYNOS5_IRQ_RTC_TIC; irq_rtc_alarm = EXYNOS5_IRQ_RTC_ALARM; } else { irq_rtc_tic = EXYNOS4_IRQ_RTC_TIC; irq_rtc_alarm = EXYNOS4_IRQ_RTC_ALARM; } #else irq_rtc_tic = IRQ_RTC_TIC; irq_rtc_alarm = IRQ_RTC_ALARM; #endif if (data->irq == irq_rtc_tic || data->irq == irq_rtc_alarm) { irqbit = 1 << (data->irq + 1 - irq_rtc_alarm); if (!state) s3c_irqwake_intmask |= irqbit; else s3c_irqwake_intmask &= ~irqbit; } else { return -ENOENT; } return 0; }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250()) ncores = 2; else if (soc_is_exynos4412() || soc_is_exynos5410()) ncores = 4; else ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static int exynos_cfg_i2s_gpio(struct platform_device *pdev) { /* configure GPIO for i2s port */ struct exynos_gpio_cfg exynos4_cfg[3] = { { EXYNOS4_GPZ(0), 7, S3C_GPIO_SFN(2) }, { EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(2) }, { EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(2) } }; struct exynos_gpio_cfg exynos5_cfg[3] = { { EXYNOS5_GPZ(0), 7, S3C_GPIO_SFN(2) }, { EXYNOS5_GPB0(0), 5, S3C_GPIO_SFN(2) }, { EXYNOS5_GPB1(0), 5, S3C_GPIO_SFN(2) } }; if (pdev->id < 0 || pdev->id > 2) { printk(KERN_ERR "Invalid Device %d\n", pdev->id); return -EINVAL; } if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos4412()) s3c_gpio_cfgpin_range(exynos4_cfg[pdev->id].addr, exynos4_cfg[pdev->id].num, exynos4_cfg[pdev->id].bit); else if (soc_is_exynos5250()) s3c_gpio_cfgpin_range(exynos5_cfg[pdev->id].addr, exynos5_cfg[pdev->id].num, exynos5_cfg[pdev->id].bit); return 0; }
void exynos_sys_powerdown_conf(enum sys_powerdown mode) { unsigned int i; if (soc_is_exynos3250()) { exynos3250_init_pmu(); if (mode == SYS_SLEEP) { __raw_writel(0x00000BB8, EXYNOS3_XUSBXTI_DURATION); __raw_writel(0x00000BB8, EXYNOS3_XXTI_DURATION); __raw_writel(0x00001D4C, EXYNOS3_EXT_REGULATOR_DURATION); __raw_writel(0x00001D4C, EXYNOS3_EXT_REGULATOR_COREBLK_DURATION); } } if (soc_is_exynos5250()) exynos5_init_pmu(); for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++) __raw_writel(exynos_pmu_config[i].val[mode], exynos_pmu_config[i].reg); if (soc_is_exynos4412()) { for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++) __raw_writel(exynos4412_pmu_config[i].val[mode], exynos4412_pmu_config[i].reg); } }
static inline void s3c2410wdt_cpufreq_deregister(void) { if (soc_is_exynos5250()) return; cpufreq_unregister_notifier(&s3c2410wdt_cpufreq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); }
void s3c_i2c0_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5250()) /* will be implemented with gpio function */ return; s3c_gpio_cfgall_range(EXYNOS4_GPD1(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); }
void s3c_i2c6_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5210() || soc_is_exynos5250()) s3c_gpio_cfgall_range(EXYNOS5_GPB1(3), 2, S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP); else s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2, S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP); }
/** * setup_sysmmu_owner * - make a relationship between System MMU and its master device * * This function changes the device hierarchy of the both of System MMU and * its master device that is specified by platfor_set_sysmmu(). * It must be ensured that this function is called after the both of System MMU * and its master device is registered. * It must be also ensured that this function is called before the both devices * are probe()ed since it is not correct to change the hierarchy of a probe()ed * device. */ static int __init setup_sysmmu_owner(void) { if (soc_is_exynos5250() || soc_is_exynos5410()) exynos5_sysmmu_init(); else if (soc_is_exynos4412() || soc_is_exynos4212()) exynos4_sysmmu_init(); return 0; }
void s3c_i2c0_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5250()) return; s3c_gpio_cfgall_range(EXYNOS4_GPD1(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); }
void __init exynos5_gsc_set_ip_ver(enum gsc_ip_version ver) { exynos_gsc0_default_data.ip_ver = ver; exynos_gsc1_default_data.ip_ver = ver; if (soc_is_exynos5250() || soc_is_exynos5410()) { exynos_gsc2_default_data.ip_ver = ver; exynos_gsc3_default_data.ip_ver = ver; } }
static void gsc_set_cam_clock(struct gsc_dev *gsc, bool on) { struct v4l2_subdev *sd = NULL; struct gsc_sensor_info *s_info = NULL; if (gsc->pipeline.sensor) { sd = gsc->pipeline.sensor; s_info = v4l2_get_subdev_hostdata(sd); } if (on) { clk_enable(gsc->clock[CLK_GATE]); if (soc_is_exynos5250() && gsc->pipeline.sensor) clk_enable(s_info->camclk); } else { clk_disable(gsc->clock[CLK_GATE]); if (soc_is_exynos5250() && gsc->pipeline.sensor) clk_disable(s_info->camclk); } }
void s3c_i2c0_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5250()) s3c_gpio_cfgall_range(EXYNOS5_GPB3(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); else /* EXYNOS4210, EXYNOS4212, EXYNOS4412 and EXYNOS4270 */ s3c_gpio_cfgall_range(EXYNOS4_GPD1(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); }
static int exynos_spdif_cfg_gpio(struct platform_device *pdev) { /* configure GPIO for SPDIF port */ if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos4412()) s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4)); else if (soc_is_exynos5250()) s3c_gpio_cfgpin_range(EXYNOS5_GPB1(0), 2, S3C_GPIO_SFN(4)); return 0; }
void exynos5_hs_i2c2_cfg_gpio(struct platform_device *dev) { if (soc_is_exynos5250()) s3c_gpio_cfgall_range(EXYNOS5_GPA0(6), 2, S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP); else if (soc_is_exynos5410()) s3c_gpio_cfgall_range(EXYNOS5410_GPB1(3), 2, S3C_GPIO_SFN(4), S3C_GPIO_PULL_UP); else pr_err("failed to configure gpio for hs-i2c2\n"); }
static int __init exynos_init_irq_eint(void) { int irq; if (soc_is_exynos5250()) exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K); else exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K); if (exynos_eint_base == NULL) { pr_err("unable to ioremap for EINT base address\n"); return -ENOMEM; } for (irq = 0 ; irq <= 31 ; irq++) { irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint, handle_level_irq); set_irq_flags(IRQ_EINT(irq), IRQF_VALID); } irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31); for (irq = 0 ; irq <= 15 ; irq++) { eint0_15_data[irq] = IRQ_EINT(irq); if (soc_is_exynos5250()) { irq_set_handler_data(exynos5_eint0_15_src_int[irq], &eint0_15_data[irq]); irq_set_chained_handler(exynos5_eint0_15_src_int[irq], exynos_irq_eint0_15); } else { irq_set_handler_data(exynos4_eint0_15_src_int[irq], &eint0_15_data[irq]); irq_set_chained_handler(exynos4_eint0_15_src_int[irq], exynos_irq_eint0_15); } } return 0; }
static void mxr_reg_mxr_dump(struct mxr_device *mdev) { #define DUMPREG(reg_id) \ do { \ mxr_dbg(mdev, #reg_id " = %08x\n", \ (u32)readl(mdev->res.mxr_regs + reg_id)); \ } while (0) DUMPREG(MXR_STATUS); DUMPREG(MXR_CFG); DUMPREG(MXR_INT_EN); DUMPREG(MXR_INT_STATUS); DUMPREG(MXR_LAYER_CFG); DUMPREG(MXR_VIDEO_CFG); DUMPREG(MXR_GRAPHIC0_CFG); DUMPREG(MXR_GRAPHIC0_BASE); DUMPREG(MXR_GRAPHIC0_SPAN); DUMPREG(MXR_GRAPHIC0_WH); DUMPREG(MXR_GRAPHIC0_SXY); DUMPREG(MXR_GRAPHIC0_DXY); DUMPREG(MXR_GRAPHIC1_CFG); DUMPREG(MXR_GRAPHIC1_BASE); DUMPREG(MXR_GRAPHIC1_SPAN); DUMPREG(MXR_GRAPHIC1_WH); DUMPREG(MXR_GRAPHIC1_SXY); DUMPREG(MXR_GRAPHIC1_DXY); if (soc_is_exynos5250()) { DUMPREG(MXR1_LAYER_CFG); DUMPREG(MXR1_VIDEO_CFG); DUMPREG(MXR1_GRAPHIC0_CFG); DUMPREG(MXR1_GRAPHIC0_BASE); DUMPREG(MXR1_GRAPHIC0_SPAN); DUMPREG(MXR1_GRAPHIC0_WH); DUMPREG(MXR1_GRAPHIC0_SXY); DUMPREG(MXR1_GRAPHIC0_DXY); DUMPREG(MXR1_GRAPHIC1_CFG); DUMPREG(MXR1_GRAPHIC1_BASE); DUMPREG(MXR1_GRAPHIC1_SPAN); DUMPREG(MXR1_GRAPHIC1_WH); DUMPREG(MXR1_GRAPHIC1_SXY); DUMPREG(MXR1_GRAPHIC1_DXY); DUMPREG(MXR_TVOUT_CFG); } #undef DUMPREG }
static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no) { struct s3c2410_uartcfg *tcfg = cfg; u32 ucnt; for (ucnt = 0; ucnt < no; ucnt++, tcfg++) tcfg->has_fracval = 1; if (soc_is_exynos5250()) s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no); else s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no); }
void __init exynos5_gsc_set_pm_qos_val(u32 mif_min, u32 int_min) { exynos_gsc0_default_data.mif_min = mif_min; exynos_gsc0_default_data.int_min = int_min; exynos_gsc1_default_data.mif_min = mif_min; exynos_gsc1_default_data.int_min = int_min; if (soc_is_exynos5250() || soc_is_exynos5410()) { exynos_gsc2_default_data.mif_min = mif_min; exynos_gsc2_default_data.int_min = int_min; exynos_gsc3_default_data.mif_min = mif_min; exynos_gsc3_default_data.int_min = int_min; } }
static int __init exynos_dma_init(void) { if (of_have_populated_dt()) return 0; if (soc_is_exynos4210()) { exynos_pdma0_pdata.nr_valid_peri = ARRAY_SIZE(exynos4210_pdma0_peri); exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri; exynos_pdma1_pdata.nr_valid_peri = ARRAY_SIZE(exynos4210_pdma1_peri); exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri; } else if (soc_is_exynos4212() || soc_is_exynos4412()) { exynos_pdma0_pdata.nr_valid_peri = ARRAY_SIZE(exynos4212_pdma0_peri); exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri; exynos_pdma1_pdata.nr_valid_peri = ARRAY_SIZE(exynos4212_pdma1_peri); exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri; } else if (soc_is_exynos5250()) { exynos_pdma0_pdata.nr_valid_peri = ARRAY_SIZE(exynos5250_pdma0_peri); exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri; exynos_pdma1_pdata.nr_valid_peri = ARRAY_SIZE(exynos5250_pdma1_peri); exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri; exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0; exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K; exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0; exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1; exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K; exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1; exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1; exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K; exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1; } dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); amba_device_register(&exynos_pdma0_device, &iomem_resource); dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); amba_device_register(&exynos_pdma1_device, &iomem_resource); dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); amba_device_register(&exynos_mdma1_device, &iomem_resource); return 0; }
static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type) { int offs = EINT_OFFSET(data->irq); int shift; u32 ctrl, mask; u32 newvalue = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: newvalue = S5P_IRQ_TYPE_EDGE_RISING; break; case IRQ_TYPE_EDGE_FALLING: newvalue = S5P_IRQ_TYPE_EDGE_FALLING; break; case IRQ_TYPE_EDGE_BOTH: newvalue = S5P_IRQ_TYPE_EDGE_BOTH; break; case IRQ_TYPE_LEVEL_LOW: newvalue = S5P_IRQ_TYPE_LEVEL_LOW; break; case IRQ_TYPE_LEVEL_HIGH: newvalue = S5P_IRQ_TYPE_LEVEL_HIGH; break; default: printk(KERN_ERR "No such irq type %d", type); return -EINVAL; } shift = (offs & 0x7) * 4; mask = 0x7 << shift; spin_lock(&eint_lock); ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq)); ctrl &= ~mask; ctrl |= newvalue << shift; __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq)); spin_unlock(&eint_lock); if (soc_is_exynos5250()) s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf)); else s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf)); return 0; }
static int __init exynos4_l2x0_cache_init(void) { int ret; if (soc_is_exynos5250()) return 0; ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); if (!ret) { l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); return 0; } if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) { l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC; /* TAG, Data Latency Control: 2 cycles */ l2x0_saved_regs.tag_latency = 0x110; if (soc_is_exynos4212() || soc_is_exynos4412()) l2x0_saved_regs.data_latency = 0x120; else l2x0_saved_regs.data_latency = 0x110; l2x0_saved_regs.prefetch_ctrl = 0x30000007; l2x0_saved_regs.pwr_ctrl = (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN); l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); __raw_writel(l2x0_saved_regs.tag_latency, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); __raw_writel(l2x0_saved_regs.data_latency, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); /* L2X0 Prefetch Control */ __raw_writel(l2x0_saved_regs.prefetch_ctrl, S5P_VA_L2CC + L2X0_PREFETCH_CTRL); /* L2X0 Power Control */ __raw_writel(l2x0_saved_regs.pwr_ctrl, S5P_VA_L2CC + L2X0_POWER_CTRL); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs)); } l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK); return 0; }