/* * Program the Video Memory carveout region * * phys_base = physical base of aperture * size_in_bytes = size of aperture in bytes */ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes) { uintptr_t vmem_end_old = video_mem_base + (video_mem_size << 20); uintptr_t vmem_end_new = phys_base + size_in_bytes; uint32_t regval; /* * The GPU is the user of the Video Memory region. In order to * transition to the new memory region smoothly, we program the * new base/size ONLY if the GPU is in reset mode. */ regval = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_GPU_RESET_REG_OFFSET); if ((regval & GPU_RESET_BIT) == 0) { ERROR("GPU not in reset! Video Memory setup failed\n"); return; } /* * Setup the Memory controller to restrict CPU accesses to the Video * Memory region */ INFO("Configuring Video Memory Carveout\n"); /* * Configure Memory Controller directly for the first time. */ if (video_mem_base == 0) goto done; /* * Clear the old regions now being exposed. The following cases * can occur - * * 1. clear whole old region (no overlap with new region) * 2. clear old sub-region below new base * 3. clear old sub-region above new end */ INFO("Cleaning previous Video Memory Carveout\n"); disable_mmu_el3(); if (phys_base > vmem_end_old || video_mem_base > vmem_end_new) zeromem16((void *)video_mem_base, video_mem_size << 20); else if (video_mem_base < phys_base) zeromem16((void *)video_mem_base, phys_base - video_mem_base); else if (vmem_end_old > vmem_end_new) zeromem16((void *)vmem_end_new, vmem_end_old - vmem_end_new); enable_mmu_el3(0); done: tegra_mc_write_32(MC_VIDEO_PROTECT_BASE, phys_base); tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20); /* store new values */ video_mem_base = phys_base; video_mem_size = size_in_bytes >> 20; }
void bl2_el3_plat_arch_setup(void) { unsigned int soc; int skip_scp = 0; int ret; uniphier_mmap_setup(BL2_BASE, BL2_SIZE, uniphier_bl2_mmap); enable_mmu_el3(0); soc = uniphier_get_soc_id(); if (soc == UNIPHIER_SOC_UNKNOWN) { ERROR("unsupported SoC\n"); plat_error_handler(-ENOTSUP); } ret = uniphier_io_setup(soc); if (ret) { ERROR("failed to setup io devices\n"); plat_error_handler(ret); } switch (uniphier_get_boot_master(soc)) { case UNIPHIER_BOOT_MASTER_THIS: INFO("Booting from this SoC\n"); skip_scp = 1; break; case UNIPHIER_BOOT_MASTER_SCP: INFO("Booting from on-chip SCP\n"); if (uniphier_scp_is_running()) { INFO("SCP is already running. SCP_BL2 load will be skipped.\n"); skip_scp = 1; } /* * SCP must be kicked every time even if it is already running * because it polls this event after the reboot of the backend. */ uniphier_bl2_kick_scp = 1; break; case UNIPHIER_BOOT_MASTER_EXT: INFO("Booting from external SCP\n"); skip_scp = 1; break; default: plat_error_handler(-ENOTSUP); break; } if (skip_scp) { struct image_info *image_info; image_info = uniphier_get_image_info(SCP_BL2_IMAGE_ID); image_info->h.attr |= IMAGE_ATTRIB_SKIP_LOADING; } }
/******************************************************************************* * Perform the very early platform specific architectural setup shared between * ARM standard platforms. This only does basic initialization. Later * architectural setup (bl31_arch_setup()) does not do anything platform * specific. ******************************************************************************/ void arm_bl31_plat_arch_setup(void) { arm_setup_page_tables(BL31_BASE, BL31_END - BL31_BASE, BL_CODE_BASE, BL_CODE_LIMIT, BL_RO_DATA_BASE, BL_RO_DATA_LIMIT #if USE_COHERENT_MEM , BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_LIMIT #endif ); enable_mmu_el3(0); }
/* * Perform the very early platform specific architectural setup here. */ void bl31_plat_arch_setup(void) { plat_arm_interconnect_init(); plat_arm_interconnect_enter_coherency(); arm_setup_page_tables(BL31_BASE, BL31_END - BL31_BASE, BL_CODE_BASE, BL_CODE_END, BL_RO_DATA_BASE, BL_RO_DATA_END, BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END); enable_mmu_el3(0); }
void bl31_plat_arch_setup(void) { const mmap_region_t bl_regions[] = { MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE, MT_MEMORY | MT_RW | MT_SECURE), MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_CODE | MT_SECURE), MAP_REGION_FLAT(BL_RO_DATA_BASE, BL_RO_DATA_END - BL_RO_DATA_END, MT_RO_DATA | MT_SECURE), {0} }; setup_page_tables(bl_regions, plat_k3_mmap); enable_mmu_el3(0); }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE), MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE), MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE, (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE), MT_DEVICE | MT_RW | MT_SECURE); #endif mmap_add(imx_mmap); init_xlat_tables(); enable_mmu_el3(0); }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_RO_START, BL31_RO_START, (BL31_RO_END - BL31_RO_START), MT_MEMORY | MT_RO | MT_SECURE); mmap_add_region(BL31_RW_START, BL31_RW_START, (BL31_RW_END - BL31_RW_START), MT_MEMORY | MT_RW | MT_SECURE); mmap_add(imx_mmap); #if USE_COHERENT_MEM mmap_add_region(BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_END - BL31_COHERENT_RAM_START, MT_DEVICE | MT_RW | MT_SECURE); #endif /* setup xlat table */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { unsigned long bl31_base_pa = tegra_bl31_phys_base; unsigned long total_base = bl31_base_pa; unsigned long total_size = BL32_BASE - BL31_RO_BASE; unsigned long ro_start = bl31_base_pa; unsigned long ro_size = BL31_RO_LIMIT - BL31_RO_BASE; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM unsigned long coh_start, coh_size; #endif /* add memory regions */ mmap_add_region(total_base, total_base, total_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_start, ro_size, MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM coh_start = total_base + (BL31_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL31_COHERENT_RAM_LIMIT - BL31_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, MT_DEVICE | MT_RW | MT_SECURE); #endif /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map) mmap_add(plat_mmio_map); else WARN("MMIO map not available\n"); /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
void bl31_plat_arch_setup(void) { static const mmap_region_t secure_partition_mmap[] = { #if ENABLE_SPM && SPM_MM MAP_REGION_FLAT(PLAT_SPM_BUF_BASE, PLAT_SPM_BUF_SIZE, MT_RW_DATA | MT_SECURE), MAP_REGION_FLAT(PLAT_SQ_SP_PRIV_BASE, PLAT_SQ_SP_PRIV_SIZE, MT_RW_DATA | MT_SECURE), #endif {0}, }; sq_mmap_setup(BL31_BASE, BL31_SIZE, secure_partition_mmap); enable_mmu_el3(XLAT_TABLE_NC); #if ENABLE_SPM && SPM_MM memcpy((void *)SPM_SHIM_EXCEPTIONS_START, (void *)SPM_SHIM_EXCEPTIONS_LMA, (uintptr_t)SPM_SHIM_EXCEPTIONS_END - (uintptr_t)SPM_SHIM_EXCEPTIONS_START); #endif }
void bl1_plat_arch_setup(void) { uniphier_mmap_setup(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE, NULL); enable_mmu_el3(0); }
void bl31_plat_enable_mmu(uint32_t flags) { enable_mmu_el3(flags); }
void bl31_plat_enable_mmu(uint32_t flags) { enable_mmu_el3(flags | XLAT_TABLE_NC); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { uint64_t rw_start = BL31_RW_START; uint64_t rw_size = BL31_RW_END - BL31_RW_START; uint64_t rodata_start = BL31_RODATA_BASE; uint64_t rodata_size = BL31_RODATA_END - BL31_RODATA_BASE; uint64_t code_base = TEXT_START; uint64_t code_size = TEXT_END - TEXT_START; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM uint32_t coh_start, coh_size; #endif const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); /* * Add timestamp for arch setup entry. */ boot_profiler_add_record("[TF] arch setup entry"); /* add memory regions */ mmap_add_region(rw_start, rw_start, rw_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(rodata_start, rodata_start, rodata_size, MT_RO_DATA | MT_SECURE); mmap_add_region(code_base, code_base, code_size, MT_CODE | MT_SECURE); /* map TZDRAM used by BL31 as coherent memory */ if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) { mmap_add_region(params_from_bl2->tzdram_base, params_from_bl2->tzdram_base, BL31_SIZE, MT_DEVICE | MT_RW | MT_SECURE); } #if USE_COHERENT_MEM coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE); #endif /* map on-chip free running uS timer */ mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0), page_align(TEGRA_TMRUS_BASE, 0), TEGRA_TMRUS_SIZE, (uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE); /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map != NULL) { mmap_add(plat_mmio_map); } else { WARN("MMIO map not available\n"); } /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); /* * Add timestamp for arch setup exit. */ boot_profiler_add_record("[TF] arch setup exit"); INFO("BL3-1: Tegra: MMU enabled\n"); }