static void __init imx6q_init_irq(void) { l2x0_of_init(0, ~0UL); imx_src_init(); imx_gpc_init(); of_irq_init(imx6q_irq_match); }
void __init kona_l2_cache_init(void) { unsigned int result; int ret; ret = bcm_kona_smc_init(); if (ret) { pr_info("Secure API not available (%d). Skipping L2 init.\n", ret); return; } result = bcm_kona_smc(SSAPI_ENABLE_L2_CACHE, 0, 0, 0, 0); if (result != SEC_ROM_RET_OK) { pr_err("Secure Monitor call failed (%u)! Skipping L2 init.\n", result); return; } /* * The aux_val and aux_mask have no effect since L2 cache is already * enabled. Pass 0s for aux_val and 1s for aux_mask for default value. */ ret = l2x0_of_init(0, ~0); if (ret) pr_err("Couldn't enable L2 cache: %d\n", ret); }
static void __init imx6q_init_irq(void) { l2x0_of_init(0, ~0UL); imx_src_init(); imx_gpc_init(); irqchip_init(); }
static void __init v2m_dt_init(void) { l2x0_of_init(0x00400000, 0xfe0fffff); of_platform_populate(NULL, of_default_bus_match_table, v2m_dt_auxdata_lookup, NULL); pm_power_off = v2m_power_off; }
/** * xilinx_init_machine() - System specific initialization, intended to be * called from board specific initialization. */ void __init xilinx_init_machine(void) { of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL); #ifdef CONFIG_CACHE_L2X0 /* * 64KB way size, 8-way associativity, parity disabled, prefetching option */ #ifndef CONFIG_XILINX_L2_PREFETCH l2x0_of_init(0x02060000, 0xF0F0FFFF); #else l2x0_of_init(0x72060000, 0xF0F0FFFF); #endif #endif platform_device_init(); }
static int __init meson_cache_of_init(void) { int aux = 0; /* put some default aux setting here */ l2x0_of_init(aux,~0); return 0; }
void __init stih41x_l2x0_init(void) { u32 way_size = 0x4; u32 aux_ctrl; /* may be this can be encoded in macros like BIT*() */ aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) | (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); }
static int __init ux500_l2x0_init(void) { /* Multiplatform guard */ if (!((cpu_is_u8500_family() || cpu_is_ux540_family()))) return -ENODEV; /* Unlock before init */ ux500_l2x0_unlock(); outer_cache.write_sec = ux500_l2c310_write_sec; l2x0_of_init(0, ~0); return 0; }
static int __init exynos4_l2x0_cache_init(void) { int ret; if (soc_is_exynos5250()) return 0; ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); if (!ret) { l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); return 0; } if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) { l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC; /* TAG, Data Latency Control: 2 cycles */ l2x0_saved_regs.tag_latency = 0x110; if (soc_is_exynos4212() || soc_is_exynos4412()) l2x0_saved_regs.data_latency = 0x120; else l2x0_saved_regs.data_latency = 0x110; l2x0_saved_regs.prefetch_ctrl = 0x30000007; l2x0_saved_regs.pwr_ctrl = (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN); l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); __raw_writel(l2x0_saved_regs.tag_latency, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); __raw_writel(l2x0_saved_regs.data_latency, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); /* L2X0 Prefetch Control */ __raw_writel(l2x0_saved_regs.prefetch_ctrl, S5P_VA_L2CC + L2X0_PREFETCH_CTRL); /* L2X0 Power Control */ __raw_writel(l2x0_saved_regs.pwr_ctrl, S5P_VA_L2CC + L2X0_POWER_CTRL); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs)); } l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK); return 0; }
static int __init kona_l2_cache_init(void) { if (!IS_ENABLED(CONFIG_CACHE_L2X0)) return 0; bcm_kona_smc(SSAPI_ENABLE_L2_CACHE, 0, 0, 0, 0); /* * The aux_val and aux_mask have no effect since L2 cache is already * enabled. Pass 0s for aux_val and 1s for aux_mask for default value. */ l2x0_of_init(0, ~0); return 0; }
static void __init tegra_init_cache(void) { #ifdef CONFIG_CACHE_L2X0 int ret; void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; u32 aux_ctrl, cache_type; cache_type = readl(p + L2X0_CACHE_TYPE); aux_ctrl = (cache_type & 0x700) << (17-8); aux_ctrl |= 0x7C400001; ret = l2x0_of_init(aux_ctrl, 0x8200c3fe); if (!ret) l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs); #endif }
void __init imx_init_l2cache(void) { void __iomem *l2x0_base; struct device_node *np; unsigned int val, cache_id; np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); if (!np) goto out; l2x0_base = of_iomap(np, 0); if (!l2x0_base) { of_node_put(np); goto out; } /* Configure the L2 PREFETCH and POWER registers */ /* Set prefetch offset with any value except 23 as per errata 765569 */ val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); val |= 0x7000000f; /* * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL/SX/DQP * is r3p2. * But according to ARM PL310 errata: 752271 * ID: 752271: Double linefill feature can cause data corruption * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2 * Workaround: The only workaround to this erratum is to disable the * double linefill feature. This is the default behavior. */ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); if (((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310) && ((cache_id & L2X0_CACHE_ID_RTL_MASK) < L2X0_CACHE_ID_RTL_R3P2)) val &= ~(1 << 30); writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN; writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL); iounmap(l2x0_base); of_node_put(np); out: l2x0_of_init(0, ~0UL); }
void __init init_IRQ(void) { int ret; if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) irqchip_init(); else machine_desc->init_irq(); if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) && (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) { if (!outer_cache.write_sec) outer_cache.write_sec = machine_desc->l2c_write_sec; ret = l2x0_of_init(machine_desc->l2c_aux_val, machine_desc->l2c_aux_mask); if (ret) pr_err("L2C: failed to init: %d\n", ret); } }
void __init imx_init_l2cache(void) { void __iomem *l2x0_base; struct device_node *np; unsigned int val; np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); if (!np) goto out; l2x0_base = of_iomap(np, 0); if (!l2x0_base) { of_node_put(np); goto out; } /* Configure the L2 PREFETCH and POWER registers */ val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL); val |= 0x70800000; /* * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2 * But according to ARM PL310 errata: 752271 * ID: 752271: Double linefill feature can cause data corruption * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2 * Workaround: The only workaround to this erratum is to disable the * double linefill feature. This is the default behavior. */ if (cpu_is_imx6q()) val &= ~(1 << 30 | 1 << 23); writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL); iounmap(l2x0_base); of_node_put(np); out: l2x0_of_init(0, ~0); }
void __init armada_370_xp_init_early(void) { char *mbus_soc_name; /* * This initialization will be replaced by a DT-based * initialization once the mvebu-mbus driver gains DT support. */ if (of_machine_is_compatible("marvell,armada370")) mbus_soc_name = "marvell,armada370-mbus"; else mbus_soc_name = "marvell,armadaxp-mbus"; mvebu_mbus_init(mbus_soc_name, ARMADA_370_XP_MBUS_WINS_BASE, ARMADA_370_XP_MBUS_WINS_SIZE, ARMADA_370_XP_SDRAM_WINS_BASE, ARMADA_370_XP_SDRAM_WINS_SIZE); #ifdef CONFIG_CACHE_L2X0 l2x0_of_init(0, ~0UL); #endif }
static int __init ux500_l2x0_init(void) { u32 aux_val = 0x3e000000; if (cpu_is_u8500_family() || cpu_is_ux540_family()) l2x0_base = __io_address(U8500_L2CC_BASE); else ux500_unknown_soc(); /* Unlock before init */ ux500_l2x0_unlock(); /* DBx540's L2 has 128KB way size */ if (cpu_is_ux540_family()) /* 128KB way size */ aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); else /* 64KB way size */ aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); /* 64KB way size, 8 way associativity, force WA */ if (of_have_populated_dt()) l2x0_of_init(aux_val, 0xc0000fff); else l2x0_init(l2x0_base, aux_val, 0xc0000fff); /* * We can't disable l2 as we are in non secure mode, currently * this seems be called only during kexec path. So let's * override outer.disable with nasty assignment until we have * some SMI service available. */ outer_cache.disable = NULL; return 0; }
void __init kona_l2_cache_init(void) { int ret; if (!IS_ENABLED(CONFIG_CACHE_L2X0)) return; ret = bcm_kona_smc_init(); if (ret) { pr_info("Secure API not available (%d). Skipping L2 init.\n", ret); return; } bcm_kona_smc(SSAPI_ENABLE_L2_CACHE, 0, 0, 0, 0); /* * The aux_val and aux_mask have no effect since L2 cache is already * enabled. Pass 0s for aux_val and 1s for aux_mask for default value. */ ret = l2x0_of_init(0, ~0); if (ret) pr_err("Couldn't enable L2 cache: %d\n", ret); }
void __init msm9625_init_irq(void) { l2x0_of_init(L2CC_AUX_CTRL, L2X0_AUX_CTRL_MASK); of_irq_init(irq_match); }
static void __init tegra_init_cache(void) { #ifdef CONFIG_CACHE_L2X0 l2x0_of_init(0x3c400001, 0xc20fc3fe); #endif }
static void __init socfpga_cyclone5_init(void) { l2x0_of_init(0, ~0UL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); socfpga_init_clocks(); }
static void __init rockchip_dt_init(void) { l2x0_of_init(0, ~0UL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); }
static void __init v2m_dt_init(void) { l2x0_of_init(0x00400000, 0xfe0fffff); of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL); }