/****************************************************************************** * This function is invoked during warm boot. Invoke the PSCI library * warm boot entry point which takes care of Architectural and platform setup/ * restore. Copy the relevant cpu_context register values to smc context which * will get programmed during `smc_exit`. *****************************************************************************/ void sp_min_warm_boot(void) { smc_ctx_t *next_smc_ctx; cpu_context_t *ctx = cm_get_context(NON_SECURE); u_register_t ns_sctlr; psci_warmboot_entrypoint(); smc_set_next_ctx(NON_SECURE); next_smc_ctx = smc_get_next_ctx(); zeromem(next_smc_ctx, sizeof(smc_ctx_t)); copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)), next_smc_ctx); /* Temporarily set the NS bit to access NS SCTLR */ write_scr(read_scr() | SCR_NS_BIT); isb(); ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); write_sctlr(ns_sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
void vm_enable_paging(void) { u32_t sctlr; write_ttbcr(0); /* Set all Domains to Client */ write_dacr(0x55555555); sctlr = read_sctlr(); /* Enable MMU */ sctlr |= SCTLR_M; /* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/ sctlr &= ~SCTLR_TRE; /* AFE set to zero (default reset value): not using simplified model. */ sctlr &= ~SCTLR_AFE; /* Enable instruction and data cache */ sctlr |= SCTLR_C; sctlr |= SCTLR_I; write_sctlr(sctlr); }
/******************************************************************************* * This function invokes the PSCI library interface to initialize the * non secure cpu context and copies the relevant cpu context register values * to smc context. These registers will get programmed during `smc_exit`. ******************************************************************************/ static void sp_min_prepare_next_image_entry(void) { entry_point_info_t *next_image_info; cpu_context_t *ctx = cm_get_context(NON_SECURE); u_register_t ns_sctlr; /* Program system registers to proceed to non-secure */ next_image_info = sp_min_plat_get_bl33_ep_info(); assert(next_image_info); assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr)); INFO("SP_MIN: Preparing exit to normal world\n"); psci_prepare_next_non_secure_ctx(next_image_info); smc_set_next_ctx(NON_SECURE); /* Copy r0, lr and spsr from cpu context to SMC context */ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)), smc_get_next_ctx()); /* Temporarily set the NS bit to access NS SCTLR */ write_scr(read_scr() | SCR_NS_BIT); isb(); ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); write_sctlr(ns_sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
int __init arch_cpu_irq_setup(void) { static const struct cpu_page zero_filled_cpu_page = { 0 }; int rc; extern u32 _start_vect[]; u32 *vectors, *vectors_data; u32 vec; struct cpu_page vec_page; #if defined(CONFIG_ARM32_HIGHVEC) /* Enable high vectors in SCTLR */ write_sctlr(read_sctlr() | SCTLR_V_MASK); vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE; #else #if defined(CONFIG_ARMV7A_SECUREX) write_vbar(CPU_IRQ_LOWVEC_BASE); #endif vectors = (u32 *) CPU_IRQ_LOWVEC_BASE; #endif vectors_data = vectors + CPU_IRQ_NR; /* If vectors are at correct location then do nothing */ if ((u32) _start_vect == (u32) vectors) { return VMM_OK; } /* If vectors are not mapped in virtual memory then map them. */ vec_page = zero_filled_cpu_page; rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page); if (rc) { rc = vmm_host_ram_alloc(&vec_page.pa, TTBL_L2TBL_SMALL_PAGE_SIZE, TRUE); if (rc) { return rc; } vec_page.va = (virtual_addr_t)vectors; vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE; vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED; vec_page.ap = TTBL_AP_SRW_U; if ((rc = cpu_mmu_map_reserved_page(&vec_page))) { return rc; } } /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < CPU_IRQ_NR; vec++) { vectors[vec] = _start_vect[vec]; vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR]; } return VMM_OK; }
void bootblock_soc_init(void) { uint32_t sctlr; /* enable dcache */ sctlr = read_sctlr(); sctlr |= SCTLR_C; write_sctlr(sctlr); }
void main(void) { const char *stage_name = "fallback/romstage"; void *entry; uint32_t sctlr; /* Globally disable MMU, caches, and branch prediction (these should * be disabled by default on reset) */ sctlr = read_sctlr(); sctlr &= ~(SCTLR_M | SCTLR_C | SCTLR_Z | SCTLR_I); write_sctlr(sctlr); armv7_invalidate_caches(); /* * Re-enable caches and branch prediction. MMU will be set up later. * Note: If booting from USB, we need to disable branch prediction * before copying from USB into RAM (FIXME: why?) */ sctlr = read_sctlr(); sctlr |= SCTLR_C | SCTLR_Z | SCTLR_I; write_sctlr(sctlr); if (boot_cpu()) { bootblock_cpu_init(); bootblock_mainboard_init(); } console_init(); printk(BIOS_INFO, "hello from bootblock\n"); printk(BIOS_INFO, "bootblock main(): loading romstage\n"); entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name); printk(BIOS_INFO, "bootblock main(): jumping to romstage\n"); if (entry) stage_exit(entry); hlt(); }
void arm_mmu_cleanup(void) { u32 sctlr = read_sctlr(); /* If MMU already disabled then return */ if (!(sctlr & SCTLR_M_MASK)) { return; } /* Disable MMU */ sctlr &= ~SCTLR_M_MASK; write_sctlr(sctlr); return; }
void exception_init(void) { uint32_t sctlr = read_sctlr(); /* Handle exceptions in ARM mode. */ sctlr &= ~SCTLR_TE; /* Set V=0 in SCTLR so VBAR points to the exception vector table. */ sctlr &= ~SCTLR_V; write_sctlr(sctlr); extern uint32_t exception_table[]; set_vbar((uintptr_t)exception_table); exception_stack_end = exception_stack + ARRAY_SIZE(exception_stack); exception_state_ptr = &exception_state; }
void vm_enable_paging(void) { u32_t sctlr; write_ttbcr(0); /* Set all Domains to Client */ write_dacr(0x55555555); sctlr = read_sctlr(); /* Enable MMU */ sctlr |= (SCTLR_M); /* Enable instruction and data cache */ sctlr |= SCTLR_C; sctlr |= SCTLR_I; write_sctlr(sctlr); }
/** * This function is called when the OS makes a firmware call with the * function code APPF_POWER_DOWN_CPU */ static int power_down_cpu(unsigned cstate, unsigned rstate, unsigned flags) { struct appf_cpu *cpu; struct appf_cluster *cluster; int cpu_index, cluster_index; int i, rc, cluster_can_enter_cstate1; struct appf_main_table* pmaintable = (struct appf_main_table*)reloc_addr((unsigned)&main_table); #ifdef USE_REALVIEW_EB_RESETS int system_reset = FALSE, last_cpu = FALSE; #endif cpu_index = appf_platform_get_cpu_index(); cluster_index = appf_platform_get_cluster_index(); cluster = pmaintable->cluster_table; cluster += cluster_index; dbg_print("cluster:",cluster); cpu = cluster->cpu_table; cpu += cpu_index; dbg_print("cpu:",cpu_index); dbg_print("cluster_index:",cluster_index); /* Validate arguments */ if (cstate > 3) { return APPF_BAD_CSTATE; } if (rstate > 3) { return APPF_BAD_RSTATE; } /* If we're just entering standby mode, we don't mark the CPU as inactive */ if (cstate == 1) { get_spinlock(cpu_index, cluster->context->lock); cpu->power_state = 1; /* See if we can make the cluster standby too */ if (rstate == 1) { cluster_can_enter_cstate1 = TRUE; for(i=0; i<cluster->num_cpus; ++i) { if (cluster->cpu_table[i].power_state != 1) { cluster_can_enter_cstate1 = FALSE; break; } } if (cluster_can_enter_cstate1) { cluster->power_state = 1; } } rc = appf_platform_enter_cstate1(cpu_index, cpu, cluster); if (rc == 0) { release_spinlock(cpu_index, cluster->context->lock); dsb(); wfi(); get_spinlock(cpu_index, cluster->context->lock); rc = appf_platform_leave_cstate1(cpu_index, cpu, cluster); } cpu->power_state = 0; cluster->power_state = 0; release_spinlock(cpu_index, cluster->context->lock); return rc; } /* Ok, we're not just entering standby, so we are going to lose the context on this CPU */ dbg_prints("step1\n"); get_spinlock(cpu_index, cluster->context->lock); --cluster->active_cpus; dbg_prints("step2\n"); cpu->power_state = cstate; if (cluster->active_cpus == 0) { cluster->power_state = rstate; #ifdef USE_REALVIEW_EB_RESETS /* last CPU down must not issue WFI, or we get stuck! */ last_cpu = TRUE; if (rstate > 1) { system_reset = TRUE; } #endif } /* add flags as required by hardware (e.g. APPF_SAVE_L2 if L2 is on) */ flags |= cpu->context->flags; appf_platform_save_context(cluster, cpu, flags); dbg_prints("step3\n"); /* Call the platform-specific shutdown code */ rc = appf_platform_enter_cstate(cpu_index, cpu, cluster); /* Did the power down succeed? */ if (rc == APPF_OK) { release_spinlock(cpu_index, cluster->context->lock); while (1) { #if 0 #if defined(NO_PCU) || defined(USE_REALVIEW_EB_RESETS) extern void platform_reset_handler(unsigned, unsigned, unsigned, unsigned); void (*reset)(unsigned, unsigned, unsigned, unsigned) = platform_reset_handler; #ifdef USE_REALVIEW_EB_RESETS /* Unlock system registers */ *(volatile unsigned *)0x10000020 = 0xa05f; if (system_reset) { /* Tell the Realview EB to do a system reset */ *(volatile unsigned *)0x10000040 = 6; /* goto reset vector! */ } else { if (!last_cpu) { /* Tell the Realview EB to put this CPU into reset */ *(volatile unsigned *)0x10000074 &= ~(1 << (6 + cpu_index)); /* goto reset vector! (when another CPU takes us out of reset) */ } } #endif /* * If we get here, either we are the last CPU, or the EB resets * aren't present (e.g. Emulator). So, fake a reset: Turn off MMU, * corrupt registers, wait for a while, jump to warm reset entry point */ write_sctlr(read_sctlr() & ~0x10001807); /* clear TRE, I Z C M */ dsb(); for (i=0; i<10000; ++i) { __nop(); } reset(0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef); #endif #endif dsb(); wfi(); /* This signals the power controller to cut the power */ /* Next stop, reset vector! */ } } else { /* Power down failed for some reason, return to the OS */ appf_platform_restore_context(cluster, cpu); cpu->power_state = 0; cluster->power_state = 0; ++cluster->active_cpus; release_spinlock(cpu_index, cluster->context->lock); } return rc; }
static int stm32mp1_ddr_setup(void) { struct ddr_info *priv = &ddr_priv_data; int ret; struct stm32mp1_ddr_config config; int node, len; uint32_t uret, idx; void *fdt; #define PARAM(x, y) \ { \ .name = x, \ .offset = offsetof(struct stm32mp1_ddr_config, y), \ .size = sizeof(config.y) / sizeof(uint32_t) \ } #define CTL_PARAM(x) PARAM("st,ctl-"#x, c_##x) #define PHY_PARAM(x) PARAM("st,phy-"#x, p_##x) const struct { const char *name; /* Name in DT */ const uint32_t offset; /* Offset in config struct */ const uint32_t size; /* Size of parameters */ } param[] = { CTL_PARAM(reg), CTL_PARAM(timing), CTL_PARAM(map), CTL_PARAM(perf), PHY_PARAM(reg), PHY_PARAM(timing), PHY_PARAM(cal) }; if (fdt_get_address(&fdt) == 0) { return -ENOENT; } node = fdt_node_offset_by_compatible(fdt, -1, DT_DDR_COMPAT); if (node < 0) { ERROR("%s: Cannot read DDR node in DT\n", __func__); return -EINVAL; } config.info.speed = fdt_read_uint32_default(node, "st,mem-speed", 0); if (!config.info.speed) { VERBOSE("%s: no st,mem-speed\n", __func__); return -EINVAL; } config.info.size = fdt_read_uint32_default(node, "st,mem-size", 0); if (!config.info.size) { VERBOSE("%s: no st,mem-size\n", __func__); return -EINVAL; } config.info.name = fdt_getprop(fdt, node, "st,mem-name", &len); if (config.info.name == NULL) { VERBOSE("%s: no st,mem-name\n", __func__); return -EINVAL; } INFO("RAM: %s\n", config.info.name); for (idx = 0; idx < ARRAY_SIZE(param); idx++) { ret = fdt_read_uint32_array(node, param[idx].name, (void *)((uintptr_t)&config + param[idx].offset), param[idx].size); VERBOSE("%s: %s[0x%x] = %d\n", __func__, param[idx].name, param[idx].size, ret); if (ret != 0) { ERROR("%s: Cannot read %s\n", __func__, param[idx].name); return -EINVAL; } } /* Disable axidcg clock gating during init */ mmio_clrbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN); stm32mp1_ddr_init(priv, &config); /* Enable axidcg clock gating */ mmio_setbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN); priv->info.size = config.info.size; VERBOSE("%s : ram size(%x, %x)\n", __func__, (uint32_t)priv->info.base, (uint32_t)priv->info.size); write_sctlr(read_sctlr() & ~SCTLR_C_BIT); dcsw_op_all(DC_OP_CISW); uret = ddr_test_data_bus(); if (uret != 0U) { ERROR("DDR data bus test: can't access memory @ 0x%x\n", uret); panic(); } uret = ddr_test_addr_bus(); if (uret != 0U) { ERROR("DDR addr bus test: can't access memory @ 0x%x\n", uret); panic(); } uret = ddr_check_size(); if (uret < config.info.size) { ERROR("DDR size: 0x%x does not match DT config: 0x%x\n", uret, config.info.size); panic(); } write_sctlr(read_sctlr() | SCTLR_C_BIT); return 0; }
void arm_mmu_setup(void) { u32 s, sec, sec_tmpl = 0x0, sec_start = 0x0, sec_end = 0x0; u32 sctlr = read_sctlr(); /* If MMU already enabled then return */ if (sctlr & SCTLR_M_MASK) { return; } /* Reset memory for L2 */ for (sec = 0; sec < (TTBL_L2TBL_SIZE / 4); sec++) { l2[sec] = 0x0; } /* Reset memory for L1 */ for (sec = 0; sec < (TTBL_L1TBL_SIZE / 4); sec++) { l1[sec] = 0x0; } /* Section entry template for code */ sec_tmpl = 0x0; sec_tmpl |= (TTBL_L1TBL_TTE_DOM_CHECKAP << TTBL_L1TBL_TTE_DOM_SHIFT); sec_tmpl |= (TTBL_AP_SRW_URW << TTBL_L1TBL_TTE_AP_SHIFT); sec_tmpl |= TTBL_L1TBL_TTE_C_MASK; sec_tmpl |= TTBL_L1TBL_TTE_TYPE_SECTION; /* Create section entries for code */ sec_start = ((u32)&_code_start) & ~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1); sec_end = ((u32)&_code_end) & ~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1); for (sec = sec_start; sec <= sec_end; sec += TTBL_L1TBL_SECTION_PAGE_SIZE) { l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec; } sec_end += TTBL_L1TBL_SECTION_PAGE_SIZE; /* Creation section entries for exception vectors */ if (sec_start > 0x0) { l1[0] = sec_tmpl | 0x0; } /* Map an additional section after code */ sec = sec_end; l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec; sec_end += TTBL_L1TBL_SECTION_PAGE_SIZE; /* Section entry template for I/O */ sec_tmpl &= ~TTBL_L1TBL_TTE_C_MASK; sec_tmpl |= TTBL_L1TBL_TTE_XN_MASK; /* Create section entries for IO */ for (s = 0; s < arm_board_iosection_count(); s++) { sec = arm_board_iosection_addr(s) & ~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1); l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec; } /* Map an l2 table after (code + additional section) */ sec_tmpl = 0x0; sec_tmpl |= TTBL_L1TBL_TTE_TYPE_L2TBL; l2_mapva = sec_end; l1[l2_mapva / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | (u32)(&l2); /* Setup test area in physical RAM */ test_area_pa = sec_end; test_area_size = TTBL_L1TBL_SECTION_PAGE_SIZE; /* Write DACR */ sec = 0x0; sec |= (TTBL_DOM_CLIENT << (2 * TTBL_L1TBL_TTE_DOM_CHECKAP)); sec |= (TTBL_DOM_MANAGER << (2 * TTBL_L1TBL_TTE_DOM_BYPASSAP)); sec |= (TTBL_DOM_NOACCESS << (2 * TTBL_L1TBL_TTE_DOM_NOACCESS)); write_dacr(sec); /* Write TTBR0 */ write_ttbr0((u32)&l1); /* Enable MMU */ sctlr |= SCTLR_M_MASK; write_sctlr(sctlr); return; }