void mmap_add(const mmap_region_t *mm) { while (mm->size) { mmap_add_region(mm->base, mm->size, mm->attr); ++mm; } }
static void expand_and_mmap(uintptr_t baseaddr, size_t size) { uintptr_t pageaddr = round_down(baseaddr, PAGE_SIZE); size_t expanded = round_up(baseaddr - pageaddr + size, PAGE_SIZE); mmap_add_region(pageaddr, pageaddr, expanded, MT_MEMORY | MT_RW | MT_NS | MT_EXECUTE_NEVER); }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE), MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE), MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE, (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE), MT_DEVICE | MT_RW | MT_SECURE); #endif mmap_add(imx_mmap); init_xlat_tables(); enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this is only intializes the mmu in a quick and dirty way. * Init MTK propiartary log buffer control field. ******************************************************************************/ void bl31_plat_arch_setup(void) { /* Enable non-secure access to CCI-400 registers */ mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET, 0x1); plat_cci_init(); plat_cci_enable(); if (gteearg.atf_log_buf_size != 0) { INFO("mmap atf buffer : 0x%x, 0x%x\n\r", gteearg.atf_log_buf_start, gteearg.atf_log_buf_size); mmap_add_region( gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK), gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK), PAGE_SIZE_2MB, MT_DEVICE | MT_RW | MT_NS); INFO("mmap atf buffer (force 2MB aligned):0x%x, 0x%x\n", (gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)), PAGE_SIZE_2MB); } /* * add TZRAM_BASE to memory map * then set RO and COHERENT to different attribute */ plat_configure_mmu_el3( (TZRAM_BASE & ~(PAGE_SIZE_MASK)), (TZRAM_SIZE & ~(PAGE_SIZE_MASK)), (BL31_RO_BASE & ~(PAGE_SIZE_MASK)), BL31_RO_LIMIT, BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_LIMIT); /* Initialize for ATF log buffer */ if (gteearg.atf_log_buf_size != 0) { gteearg.atf_aee_debug_buf_size = ATF_AEE_BUFFER_SIZE; gteearg.atf_aee_debug_buf_start = gteearg.atf_log_buf_start + gteearg.atf_log_buf_size - ATF_AEE_BUFFER_SIZE; INFO("ATF log service is registered (0x%x, aee:0x%x)\n", gteearg.atf_log_buf_start, gteearg.atf_aee_debug_buf_start); } else{ gteearg.atf_aee_debug_buf_size = 0; gteearg.atf_aee_debug_buf_start = 0; } /* Platform code before bl31_main */ /* compatible to the earlier chipset */ /* Show to ATF log buffer & UART */ INFO("BL3-1: %s\n", version_string); INFO("BL3-1: %s\n", build_message); }
void plat_configure_mmu_svc_mon(unsigned long total_base, unsigned long total_size, unsigned long ro_start, unsigned long ro_limit, unsigned long coh_start, unsigned long coh_limit) { mmap_add_region(total_base, total_base, total_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_start, ro_limit - ro_start, MT_MEMORY | MT_RO | MT_SECURE); mmap_add_region(coh_start, coh_start, coh_limit - coh_start, MT_DEVICE | MT_RW | MT_SECURE); mmap_add(plat_rk_mmap); rockchip_plat_mmu_svc_mon(); init_xlat_tables(); enable_mmu_svc_mon(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this is only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup() { #if RESET_TO_BL31 mt_cci_setup(); #endif /* Enable non-secure access to CCI-400 registers */ mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET , 0x1); /* set secondary CPUs to AArch64 */ printf("###@@@ MP0_MISC_CONFIG3:0x%08x @@@###\n", mmio_read_32(MP0_MISC_CONFIG3)); mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | 0x0000E000); printf("###@@@ MP0_MISC_CONFIG3:0x%08x @@@###\n", mmio_read_32(MP0_MISC_CONFIG3)); { atf_arg_t_ptr teearg = (atf_arg_t_ptr)(uintptr_t)TEE_BOOT_INFO_ADDR; if(teearg->atf_log_buf_size !=0 ) { printf("mmap atf buffer : 0x%x, 0x%x\n\r", teearg->atf_log_buf_start, teearg->atf_log_buf_size); mmap_add_region((teearg->atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)), PAGE_SIZE_2MB, MT_DEVICE | MT_RW | MT_NS); printf("mmap atf buffer (force 2MB aligned): 0x%x, 0x%x\n\r", (teearg->atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)), PAGE_SIZE_2MB); } } // add TZRAM2_BASE to memory map mmap_add_region(TZRAM2_BASE, ((TZRAM2_SIZE & ~(PAGE_SIZE_MASK)) + PAGE_SIZE), MT_MEMORY | MT_RW | MT_SECURE); // add TZRAM_BASE to memory map // then set RO and COHERENT to different attribute mt_configure_mmu_el3(TZRAM_BASE, ((TZRAM_SIZE & ~(PAGE_SIZE_MASK)) + PAGE_SIZE), BL31_RO_BASE, BL31_RO_LIMIT, BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_LIMIT); /* * Without this, access to CPUECTRL from NS EL1 * will cause trap into EL3 */ enable_ns_access_to_cpuectlr(); }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_RO_START, BL31_RO_START, (BL31_RO_END - BL31_RO_START), MT_MEMORY | MT_RO | MT_SECURE); mmap_add_region(BL31_RW_START, BL31_RW_START, (BL31_RW_END - BL31_RW_START), MT_MEMORY | MT_RW | MT_SECURE); mmap_add(imx_mmap); #if USE_COHERENT_MEM mmap_add_region(BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_END - BL31_COHERENT_RAM_START, MT_DEVICE | MT_RW | MT_SECURE); #endif /* setup xlat table */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { unsigned long bl31_base_pa = tegra_bl31_phys_base; unsigned long total_base = bl31_base_pa; unsigned long total_size = BL32_BASE - BL31_RO_BASE; unsigned long ro_start = bl31_base_pa; unsigned long ro_size = BL31_RO_LIMIT - BL31_RO_BASE; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM unsigned long coh_start, coh_size; #endif /* add memory regions */ mmap_add_region(total_base, total_base, total_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_start, ro_size, MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM coh_start = total_base + (BL31_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL31_COHERENT_RAM_LIMIT - BL31_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, MT_DEVICE | MT_RW | MT_SECURE); #endif /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map) mmap_add(plat_mmio_map); else WARN("MMIO map not available\n"); /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
void rockchip_plat_sram_mmu_el3(void) { #ifdef PLAT_EXTRA_LD_SCRIPT size_t sram_size; /* sram.text size */ sram_size = (char *)&__bl31_sram_text_end - (char *)&__bl31_sram_text_start; mmap_add_region((unsigned long)&__bl31_sram_text_start, (unsigned long)&__bl31_sram_text_start, sram_size, MT_MEMORY | MT_RO | MT_SECURE); /* sram.data size */ sram_size = (char *)&__bl31_sram_data_end - (char *)&__bl31_sram_data_start; mmap_add_region((unsigned long)&__bl31_sram_data_start, (unsigned long)&__bl31_sram_data_start, sram_size, MT_MEMORY | MT_RW | MT_SECURE); #else /* TODO: Support other SoCs, Just support RK3399 now */ return; #endif }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { uint64_t rw_start = BL31_RW_START; uint64_t rw_size = BL31_RW_END - BL31_RW_START; uint64_t rodata_start = BL31_RODATA_BASE; uint64_t rodata_size = BL31_RODATA_END - BL31_RODATA_BASE; uint64_t code_base = TEXT_START; uint64_t code_size = TEXT_END - TEXT_START; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM uint32_t coh_start, coh_size; #endif const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); /* * Add timestamp for arch setup entry. */ boot_profiler_add_record("[TF] arch setup entry"); /* add memory regions */ mmap_add_region(rw_start, rw_start, rw_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(rodata_start, rodata_start, rodata_size, MT_RO_DATA | MT_SECURE); mmap_add_region(code_base, code_base, code_size, MT_CODE | MT_SECURE); /* map TZDRAM used by BL31 as coherent memory */ if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) { mmap_add_region(params_from_bl2->tzdram_base, params_from_bl2->tzdram_base, BL31_SIZE, MT_DEVICE | MT_RW | MT_SECURE); } #if USE_COHERENT_MEM coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE); #endif /* map on-chip free running uS timer */ mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0), page_align(TEGRA_TMRUS_BASE, 0), TEGRA_TMRUS_SIZE, (uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE); /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map != NULL) { mmap_add(plat_mmio_map); } else { WARN("MMIO map not available\n"); } /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); /* * Add timestamp for arch setup exit. */ boot_profiler_add_record("[TF] arch setup exit"); INFO("BL3-1: Tegra: MMU enabled\n"); }