void __init setup_page_tables(const mmap_region_t *bl_regions, const mmap_region_t *plat_regions) { #if LOG_LEVEL >= LOG_LEVEL_VERBOSE const mmap_region_t *regions = bl_regions; while (regions->size != 0U) { VERBOSE("Region: 0x%lx - 0x%lx has attributes 0x%x\n", regions->base_va, regions->base_va + regions->size, regions->attr); regions++; } #endif /* * Map the Trusted SRAM with appropriate memory attributes. * Subsequent mappings will adjust the attributes for specific regions. */ mmap_add(bl_regions); /* Now (re-)map the platform-specific memory regions */ mmap_add(plat_regions); /* Create the page tables to reflect the above mappings */ init_xlat_tables(); }
/******************************************************************************* * Function that sets up the translation tables. ******************************************************************************/ void gxbb_setup_page_tables(void) { #if IMAGE_BL31 const mmap_region_t gxbb_bl_mmap[] = { MAP_BL31, MAP_BL_CODE, MAP_BL_RO_DATA, #if USE_COHERENT_MEM MAP_BL_COHERENT, #endif {0} }; #endif mmap_add(gxbb_bl_mmap); mmap_add(gxbb_mmap); init_xlat_tables(); }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE), MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE), MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE, (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE), MT_DEVICE | MT_RW | MT_SECURE); #endif mmap_add(imx_mmap); init_xlat_tables(); enable_mmu_el3(0); }
void plat_configure_mmu_svc_mon(unsigned long total_base, unsigned long total_size, unsigned long ro_start, unsigned long ro_limit, unsigned long coh_start, unsigned long coh_limit) { mmap_add_region(total_base, total_base, total_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_start, ro_limit - ro_start, MT_MEMORY | MT_RO | MT_SECURE); mmap_add_region(coh_start, coh_start, coh_limit - coh_start, MT_DEVICE | MT_RW | MT_SECURE); mmap_add(plat_rk_mmap); rockchip_plat_mmu_svc_mon(); init_xlat_tables(); enable_mmu_svc_mon(0); }
MAPID valueMap::add(JS::HandleValue val, int mark) { int ret = containsValue(val); if (ret != 0) { return ret; } //stHeapValue* p = new stHeapValue(val); /*stHeapValue p(val); p.mark = (char)mark;*/ int J = 0; /*list<int>::iterator itBegin = lstFree.begin(); if (itBegin != lstFree.end()) { J = *itBegin; lstFree.erase(itBegin); } else*/ // 2015.Nov.3rd // 先改成不回收利用 // 脚本的成员函数如 Update 如果 ID 是使用回收来的,当时可能还存在于 idFunRet // 再次 callFunctionValue 时会把这个 ID 给移除掉 // 可能是导致 调用到其他脚本的 Update,错乱了 { J = valueMap::index++; } mmap_newelement(p, J, val, mark); Assert(!valueMap::tracing); VALUEMAPIT itJ = mmap_find(J); Assert(mit_invalid(itJ)); // 1) mmap_add(J, p); // 2) Assert(!GCing); vmap_add(p->heapValue.get().asRawBits(), J); return J; }
void bl31_plat_arch_setup(void) { mmap_add_region(BL31_RO_START, BL31_RO_START, (BL31_RO_END - BL31_RO_START), MT_MEMORY | MT_RO | MT_SECURE); mmap_add_region(BL31_RW_START, BL31_RW_START, (BL31_RW_END - BL31_RW_START), MT_MEMORY | MT_RW | MT_SECURE); mmap_add(imx_mmap); #if USE_COHERENT_MEM mmap_add_region(BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_START, BL31_COHERENT_RAM_END - BL31_COHERENT_RAM_START, MT_DEVICE | MT_RW | MT_SECURE); #endif /* setup xlat table */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { unsigned long bl31_base_pa = tegra_bl31_phys_base; unsigned long total_base = bl31_base_pa; unsigned long total_size = BL32_BASE - BL31_RO_BASE; unsigned long ro_start = bl31_base_pa; unsigned long ro_size = BL31_RO_LIMIT - BL31_RO_BASE; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM unsigned long coh_start, coh_size; #endif /* add memory regions */ mmap_add_region(total_base, total_base, total_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(ro_start, ro_start, ro_size, MT_MEMORY | MT_RO | MT_SECURE); #if USE_COHERENT_MEM coh_start = total_base + (BL31_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL31_COHERENT_RAM_LIMIT - BL31_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, MT_DEVICE | MT_RW | MT_SECURE); #endif /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map) mmap_add(plat_mmio_map); else WARN("MMIO map not available\n"); /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup(void) { uint64_t rw_start = BL31_RW_START; uint64_t rw_size = BL31_RW_END - BL31_RW_START; uint64_t rodata_start = BL31_RODATA_BASE; uint64_t rodata_size = BL31_RODATA_END - BL31_RODATA_BASE; uint64_t code_base = TEXT_START; uint64_t code_size = TEXT_END - TEXT_START; const mmap_region_t *plat_mmio_map = NULL; #if USE_COHERENT_MEM uint32_t coh_start, coh_size; #endif const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); /* * Add timestamp for arch setup entry. */ boot_profiler_add_record("[TF] arch setup entry"); /* add memory regions */ mmap_add_region(rw_start, rw_start, rw_size, MT_MEMORY | MT_RW | MT_SECURE); mmap_add_region(rodata_start, rodata_start, rodata_size, MT_RO_DATA | MT_SECURE); mmap_add_region(code_base, code_base, code_size, MT_CODE | MT_SECURE); /* map TZDRAM used by BL31 as coherent memory */ if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) { mmap_add_region(params_from_bl2->tzdram_base, params_from_bl2->tzdram_base, BL31_SIZE, MT_DEVICE | MT_RW | MT_SECURE); } #if USE_COHERENT_MEM coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE); coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE; mmap_add_region(coh_start, coh_start, coh_size, (uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE); #endif /* map on-chip free running uS timer */ mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0), page_align(TEGRA_TMRUS_BASE, 0), TEGRA_TMRUS_SIZE, (uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE); /* add MMIO space */ plat_mmio_map = plat_get_mmio_map(); if (plat_mmio_map != NULL) { mmap_add(plat_mmio_map); } else { WARN("MMIO map not available\n"); } /* set up translation tables */ init_xlat_tables(); /* enable the MMU */ enable_mmu_el3(0); /* * Add timestamp for arch setup exit. */ boot_profiler_add_record("[TF] arch setup exit"); INFO("BL3-1: Tegra: MMU enabled\n"); }