static int __init ux500_l2x0_init(void) { uint32_t aux_val = 0x3e000000; if (cpu_is_u5500()) l2x0_base = __io_address(U5500_L2CC_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) l2x0_base = __io_address(U8500_L2CC_BASE); else ux500_unknown_soc(); /* u9540's L2 has 128KB way size */ if (cpu_is_u9540()) aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); /* 128KB way size */ else aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); /* 64KB way size */ /* 8 way associativity, force WA */ l2x0_init(l2x0_base, aux_val, 0xc0000fff); /* Override invalidate function */ outer_cache.disable = ux500_l2x0_disable; outer_cache.inv_all = ux500_l2x0_inv_all; return 0; }
/* * FIXME: Should we set up the GPIO domain here? * * The problem is that we cannot put the interrupt resources into the platform * device until the irqdomain has been added. Right now, we set the GIC interrupt * domain from init_irq(), then load the gpio driver from * core_initcall(nmk_gpio_init) and add the platform devices from * arch_initcall(customize_machine). * * This feels fragile because it depends on the gpio device getting probed * _before_ any device uses the gpio interrupts. */ void __init ux500_init_irq(void) { struct device_node *np; struct resource r; irqchip_init(); np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu"); of_address_to_resource(np, 0, &r); of_node_put(np); if (!r.start) { pr_err("could not find PRCMU base resource\n"); return; } prcmu_early_init(r.start, r.end-r.start); ux500_pm_init(r.start, r.end-r.start); /* * Init clocks here so that they are available for system timer * initialization. */ if (cpu_is_u8500_family()) u8500_clk_init(); else if (cpu_is_u9540()) u9540_clk_init(); else if (cpu_is_u8540()) u8540_clk_init(); }
/* * FIXME: Should we set up the GPIO domain here? * * The problem is that we cannot put the interrupt resources into the platform * device until the irqdomain has been added. Right now, we set the GIC interrupt * domain from init_irq(), then load the gpio driver from * core_initcall(nmk_gpio_init) and add the platform devices from * arch_initcall(customize_machine). * * This feels fragile because it depends on the gpio device getting probed * _before_ any device uses the gpio interrupts. */ void __init ux500_init_irq(void) { gic_arch_extn.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; irqchip_init(); /* * Init clocks here so that they are available for system timer * initialization. */ if (cpu_is_u8500_family()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); u8500_of_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } else if (cpu_is_u9540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } else if (cpu_is_u8540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } }
void __init u8500_map_io(void) { /* * Map the UARTs early so that the DEBUG_LL stuff continues to work. */ iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc)); ux500_map_io(); iotable_init(u8500_common_io_desc, ARRAY_SIZE(u8500_common_io_desc)); if (cpu_is_u9540()) iotable_init(u9540_io_desc, ARRAY_SIZE(u9540_io_desc)); else iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc)); _PRCMU_BASE = __io_address(U8500_PRCMU_BASE); #ifdef CONFIG_CACHE_L2X0 /* L2 cache may already be enabled and must be initialized for * ramdumping to work. This is the earliest possible place. */ ux500_l2x0_init(); #endif }
static void __init wakeup_secondary(void) { void __iomem *backupram; if (cpu_is_u5500()) backupram = __io_address(U5500_BACKUPRAM0_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) backupram = __io_address(U8500_BACKUPRAM0_BASE); else ux500_unknown_soc(); /* * write the address of secondary startup into the backup ram register * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the * backup ram register at offset 0x1FF0, which is what boot rom code * is waiting for. This would wake up the secondary core from WFE */ #define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 __raw_writel(virt_to_phys(u8500_secondary_startup), backupram + UX500_CPU1_JUMPADDR_OFFSET); #define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 __raw_writel(0xA1FEED01, backupram + UX500_CPU1_WAKEMAGIC_OFFSET); /* make sure write buffer is drained */ mb(); }
static int __init ux500_debug_last_io_init(void) { size_t size; size = sizeof(struct ux500_debug_last_io) * num_possible_cpus(); ux500_last_io = dma_alloc_coherent(NULL, size, &ux500_last_io_phys, GFP_KERNEL); if (!ux500_last_io) { ; return -ENOMEM; } if (cpu_is_u5500()) l2x0_base = __io_address(U5500_L2CC_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) l2x0_base = __io_address(U8500_L2CC_BASE); /* * CONFIG_UX500_DEBUG_LAST_IO is only intended for debugging. * It should not be left enabled. */ WARN_ON(1); return 0; }
/** * @brief register prcmu handler * * @param fops */ void __init prcmu_early_init(void) { int i, ret = 0; struct prcmu_fops_register_data *data; if (cpu_is_u9540()) data = dbx540_prcmu_early_init(); else data = db8500_prcmu_early_init(); if (data == NULL) return; dbx500_prcmu_init_ctx(); for (i = 0; i < data->size; i++) { switch (data->tab[i].fops) { case PRCMU_EARLY: dbx500_prcmu_context.pearly = data->tab[i].data.pearly; break; case PRCMU_VAL: dbx500_prcmu_register_pval(data->tab[i].data.pval, data->tab[i].size); break; case PRCMU_OUT: dbx500_prcmu_register_pout(data->tab[i].data.pout, data->tab[i].size); break; default: dbx500_prcmu_error("ops out of range"); ret = -EIO; } } return; }
void __init ux500_init_irq(void) { void __iomem *dist_base; void __iomem *cpu_base; gic_arch_extn.irq_set_wake = ux500_gic_irq_set_wake; if (cpu_is_u5500()) { dist_base = __io_address(U5500_GIC_DIST_BASE); cpu_base = __io_address(U5500_GIC_CPU_BASE); } else if (cpu_is_u8500() || cpu_is_u9540()) { dist_base = __io_address(U8500_GIC_DIST_BASE); cpu_base = __io_address(U8500_GIC_CPU_BASE); } else ux500_unknown_soc(); gic_init(0, 29, dist_base, cpu_base); /* * On WD reboot gic is in some cases decoupled. * This will make sure that the GIC is correctly configured. */ ux500_pm_gic_recouple(); /* * Init clocks here so that they are available for system timer * initialization. */ prcmu_early_init(); /* backwards compatible */ if (!arm_pm_restart) arm_pm_restart = ux500_restart; clk_init(); }
static int __init init_display_devices(void) { if (cpu_is_u8500()) return handle_display_devices_in_u8500(); else if (cpu_is_u9540()) return handle_display_devices_in_u9540(); else return 0; }
static void __iomem *scu_base_addr(void) { if (cpu_is_u5500()) return __io_address(U5500_SCU_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) return __io_address(U8500_SCU_BASE); else ux500_unknown_soc(); return NULL; }
static int __init prefetch_ctrl_init(void) { int err; int origin_err; /* Selects trustzone application needed for the job. */ struct tee_uuid static_uuid = { L2X0_UUID_TEE_TA_START_LOW, L2X0_UUID_TEE_TA_START_MID, L2X0_UUID_TEE_TA_START_HIGH, L2X0_UUID_TEE_TA_START_CLOCKSEQ, }; /* Get PL310 base address. It will be used as readonly. */ if (cpu_is_u5500()) l2x0_base = __io_address(U5500_L2CC_BASE); else if (cpu_is_u8500() || cpu_is_u9540()) l2x0_base = __io_address(U8500_L2CC_BASE); else ux500_unknown_soc(); err = teec_initialize_context(NULL, &context); if (err) { pr_err("l2x0-prefetch: unable to initialize tee context," " err = %d\n", err); err = -EINVAL; goto error0; } err = teec_open_session(&context, &session, &static_uuid, TEEC_LOGIN_PUBLIC, NULL, NULL, &origin_err); if (err) { pr_err("l2x0-prefetch: unable to open tee session," " tee error = %d, origin error = %d\n", err, origin_err); err = -EINVAL; goto error1; } outer_cache.prefetch_enable = prefetch_enable; outer_cache.prefetch_disable = prefetch_disable; pr_info("l2x0-prefetch: initialized.\n"); return 0; error1: (void)teec_finalize_context(&context); error0: return err; }
/* * Restore VAPE context */ void context_vape_restore(void) { restore_prcc(); restore_tpiu(); restore_stm_ape(); if (cpu_is_u5500()) u5500_context_restore_icn(); if (cpu_is_u8500()) u8500_context_restore_icn(); if (cpu_is_u9540()) u9540_context_restore_icn(); atomic_notifier_call_chain(&context_ape_notifier_list, CONTEXT_APE_RESTORE, NULL); }
/* * Save VAPE context */ void context_vape_save(void) { atomic_notifier_call_chain(&context_ape_notifier_list, CONTEXT_APE_SAVE, NULL); if (cpu_is_u5500()) u5500_context_save_icn(); if (cpu_is_u8500()) u8500_context_save_icn(); if (cpu_is_u9540()) u9540_context_save_icn(); save_stm_ape(); save_tpiu(); save_prcc(); }
void __init u8500_map_io(void) { /* * Map the UARTs early so that the DEBUG_LL stuff continues to work. */ iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc)); ux500_map_io(); iotable_init(u8500_common_io_desc, ARRAY_SIZE(u8500_common_io_desc)); if (cpu_is_u9540()) iotable_init(u9540_io_desc, ARRAY_SIZE(u9540_io_desc)); else iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc)); _PRCMU_BASE = __io_address(U8500_PRCMU_BASE); }
/* * FIXME: Should we set up the GPIO domain here? * * The problem is that we cannot put the interrupt resources into the platform * device until the irqdomain has been added. Right now, we set the GIC interrupt * domain from init_irq(), then load the gpio driver from * core_initcall(nmk_gpio_init) and add the platform devices from * arch_initcall(customize_machine). * * This feels fragile because it depends on the gpio device getting probed * _before_ any device uses the gpio interrupts. */ void __init ux500_init_irq(void) { void __iomem *dist_base; void __iomem *cpu_base; gic_arch_extn.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; if (cpu_is_u8500_family() || cpu_is_ux540_family()) { dist_base = __io_address(U8500_GIC_DIST_BASE); cpu_base = __io_address(U8500_GIC_CPU_BASE); } else ux500_unknown_soc(); #ifdef CONFIG_OF if (of_have_populated_dt()) irqchip_init(); else #endif gic_init(0, 29, dist_base, cpu_base); /* * Init clocks here so that they are available for system timer * initialization. */ if (cpu_is_u8500_family()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } else if (cpu_is_u9540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } else if (cpu_is_u8540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); u8540_clk_init(); } }
int init_config(void) { if (cfgMpcSDRAMCodeSize_SVA == 0 || cfgMpcSDRAMCodeSize_SIA == 0) { pr_err("SDRAM code size must be greater than 0\n"); return -EINVAL; } if (cfgMpcSDRAMDataSize_SVA == 0) { pr_err("SDRAM data size for SVA must be greater than 0\n"); return -EINVAL; } if (cpu_is_u9540()) { osalEnv.esram_base_phys = U9540_ESRAM_CM_BASE; cfgESRAMSize = U9540_ESRAM_CM_SIZE; } else { osalEnv.esram_base_phys = U8500_ESRAM_CM_BASE; cfgESRAMSize = U8500_ESRAM_CM_SIZE; } osalEnv.mpc[SVA].nbYramBanks = cfgMpcYBanks_SVA; osalEnv.mpc[SVA].eeId = cfgSchedulerTypeHybrid_SVA ? HYBRID_EXECUTIVE_ENGINE : SYNCHRONOUS_EXECUTIVE_ENGINE; osalEnv.mpc[SVA].sdram_code.size = cfgMpcSDRAMCodeSize_SVA * ONE_KB; osalEnv.mpc[SVA].sdram_data.size = cfgMpcSDRAMDataSize_SVA * ONE_KB; osalEnv.mpc[SVA].base.size = 128*ONE_KB; //we expose only TCM24 init_waitqueue_head(&osalEnv.mpc[SVA].trace_waitq); osalEnv.mpc[SIA].nbYramBanks = cfgMpcYBanks_SIA; osalEnv.mpc[SIA].eeId = cfgSchedulerTypeHybrid_SIA ? HYBRID_EXECUTIVE_ENGINE : SYNCHRONOUS_EXECUTIVE_ENGINE; osalEnv.mpc[SIA].sdram_code.size = cfgMpcSDRAMCodeSize_SIA * ONE_KB; osalEnv.mpc[SIA].sdram_data.size = cfgMpcSDRAMDataSize_SIA * ONE_KB; osalEnv.mpc[SIA].base.size = 128*ONE_KB; //we expose only TCM24 init_waitqueue_head(&osalEnv.mpc[SIA].trace_waitq); return 0; }
static int __init context_init(void) { int i; void __iomem *ux500_backup_ptr; /* allocate backup pointer for RAM data */ ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL, get_order(U8500_BACKUPRAM_SIZE)); if (!ux500_backup_ptr) { pr_warning("context: could not allocate backup memory\n"); return -ENOMEM; } /* * ROM code addresses to store backup contents, * pass the physical address of back up to ROM code */ writel(virt_to_phys(ux500_backup_ptr), IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR)); if (cpu_is_u5500()) { writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); context_tpiu.base = ioremap(U5500_TPIU_BASE, SZ_4K); context_stm_ape.base = ioremap(U5500_STM_REG_BASE, SZ_4K); context_scu.base = ioremap(U5500_SCU_BASE, SZ_4K); context_prcc[0].base = ioremap(U5500_CLKRST1_BASE, SZ_4K); context_prcc[1].base = ioremap(U5500_CLKRST2_BASE, SZ_4K); context_prcc[2].base = ioremap(U5500_CLKRST3_BASE, SZ_4K); context_prcc[3].base = ioremap(U5500_CLKRST5_BASE, SZ_4K); context_prcc[4].base = ioremap(U5500_CLKRST6_BASE, SZ_4K); context_gic_dist_common.base = ioremap(U5500_GIC_DIST_BASE, SZ_4K); per_cpu(context_gic_cpu, 0).base = ioremap(U5500_GIC_CPU_BASE, SZ_4K); } else if (cpu_is_u8500() || cpu_is_u9540()) { /* Give logical address to backup RAM. For both CPUs */ if (cpu_is_u9540()) { writel(IO_ADDRESS_DB9540_ROM(U9540_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); writel(IO_ADDRESS_DB9540_ROM(U9540_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); } else { writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE), IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR)); } context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K); context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K); context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K); /* PERIPH4 is always on, so no need saving prcc */ context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K); context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K); context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K); context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K); context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K); context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K); per_cpu(context_gic_cpu, 0).base = ioremap(U8500_GIC_CPU_BASE, SZ_4K); } per_cpu(context_gic_dist_cpu, 0).base = context_gic_dist_common.base; for (i = 1; i < num_possible_cpus(); i++) { per_cpu(context_gic_cpu, i).base = per_cpu(context_gic_cpu, 0).base; per_cpu(context_gic_dist_cpu, i).base = per_cpu(context_gic_dist_cpu, 0).base; } for (i = 0; i < ARRAY_SIZE(context_prcc); i++) { const int clusters[] = {1, 2, 3, 5, 6}; char clkname[10]; snprintf(clkname, sizeof(clkname), "PERIPH%d", clusters[i]); context_prcc[i].clk = clk_get_sys(clkname, NULL); BUG_ON(IS_ERR(context_prcc[i].clk)); } if (cpu_is_u8500()) { u8500_context_init(); } else if (cpu_is_u5500()) { u5500_context_init(); } else if (cpu_is_u9540()) { u9540_context_init(); } else { printk(KERN_ERR "context: unknown hardware!\n"); return -EINVAL; } return 0; }