/* * Func: post_sysinfo_scan_mmu_setup * Desc: Once we have scanned coreboot tables, we have complete information * about different memory ranges. Thus, we can perform a complete mmu * initialization. Also, this takes care of DMA area setup */ static void post_sysinfo_scan_mmu_setup(void) { struct memrange *ranges; uint64_t nranges; struct mmu_ranges mmu_ranges; struct mmu_memrange *dma_range; /* Get memrange info from lib_sysinfo */ lib_sysinfo_get_memranges(&ranges, &nranges); /* Get memory ranges for mmu init from lib_sysinfo memrange */ dma_range = mmu_init_ranges_from_sysinfo(ranges, nranges, &mmu_ranges); /* Disable mmu */ mmu_disable(); /* Init mmu */ mmu_init(&mmu_ranges); /* Enable mmu */ mmu_enable(); /* Init dma memory */ init_dma_memory((void *)dma_range->base, dma_range->size); }
static int virt_mmu_enable(void) { /* Mapping all periph and flash range */ arch_remap_range((void *)0x00000000, 0x40000000, DEV_MEM); mmu_enable(); return 0; }
static void eukrea_cpuimx25_mmu_init(void) { mmu_init(); arm_create_section(0x80000000, 0x80000000, 128, PMD_SECT_DEF_CACHED); arm_create_section(0x90000000, 0x80000000, 128, PMD_SECT_DEF_UNCACHED); setup_dma_coherent(0x10000000); mmu_enable(); }
void sdm845_mmu_init(void) { mmu_init(); mmu_config_range((void *)(4 * KiB), ((4UL * GiB) - (4 * KiB)), DEV_MEM); mmu_config_range((void *)_ssram, REGION_SIZE(ssram), CACHED_RAM); mmu_config_range((void *)_bsram, REGION_SIZE(bsram), CACHED_RAM); mmu_config_range((void *)_dma_coherent, REGION_SIZE(dma_coherent), UNCACHED_RAM); mmu_enable(); }
void init_memory_secondary(){ // Disable MMU mmu_disable(); // Get the L1 page table base address. uint32_t * table = (uint32_t *)&__l1_page_table_start; // write entry table address to TTBR0 _ARM_MCR(15, 0, table, 2, 0, 0); // set Client mode for all Domains uint32_t dacr = 0x55555555; _ARM_MCR(15, 0, dacr, 3, 0, 0); // MCR p15, 0, <Rd>, c3, c0, 0 ; Write DACR // Enable MMU mmu_enable(); /**************************************** * Branch prediction */ // Disable branch prediction arm_branch_prediction_disable(); // Invalidate branch prediction array arm_branch_target_cache_invalidate(); // Branch Prediction Enable arm_branch_prediction_enable(); /**************************************** * Instruction Cache */ // Disable L1 Instruction cache arm_icache_disable(); // Invalidate Instruction cache arm_icache_invalidate(); // Enable Instruction cache arm_icache_enable(); /**************************************** * Data Cache */ // Disable L1 Data Caches arm_dcache_disable(); // Invalidate Data cache arm_dcache_invalidate(); // Enable Data cache arm_dcache_enable(); }
void tegra132_mmu_init(void) { uintptr_t tz_base_mib; size_t tz_size_mib; size_t ttb_size_mib; struct memranges *map = &t132_mmap_ranges; tegra132_memrange_init(map); mainboard_add_memory_ranges(map); /* Place page tables at the base of the trust zone region. */ carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib); tz_base_mib *= MiB; ttb_size_mib = TTB_SIZE * MiB; mmu_init(map, (void *)tz_base_mib, ttb_size_mib); mmu_enable(); }
static void eukrea_cpuimx27_mmu_init(void) { mmu_init(); arm_create_section(0xa0000000, 0xa0000000, 128, PMD_SECT_DEF_CACHED); arm_create_section(0xb0000000, 0xa0000000, 128, PMD_SECT_DEF_UNCACHED); setup_dma_coherent(0x10000000); #if TEXT_BASE & (0x100000 - 1) #warning cannot create vector section. Adjust TEXT_BASE to a 1M boundary #else arm_create_section(0x0, TEXT_BASE, 1, PMD_SECT_DEF_UNCACHED); #endif mmu_enable(); }
void *setup_mmu(phys_addr_t phys_end){ pgd_t *page_root; /* allocate a region-1 table */ page_root = pgd_alloc_one(); /* map all physical memory 1:1 */ setup_identity(page_root, 0, phys_end); /* generate 128MB of invalid adresses at the end (for testing PGM) */ init_alloc_vpage((void *) -(1UL << 27)); setup_identity(page_root, -(1UL << 27), 0); /* finally enable DAT with the new table */ mmu_enable(page_root); table_root = page_root; return page_root; }
void mt8173_mmu_init(void) { mmu_init(); /* Set 0x0 to the end of 2GB dram address as device memory */ mmu_config_range((void *)0, (uintptr_t)_dram + 2U * GiB, DEV_MEM); /* SRAM is cached */ mmu_config_range(_sram_l2c, _sram_l2c_size + _sram_size, CACHED_MEM); /* DMA is non-cached and is reserved for TPM & da9212 I2C DMA */ mmu_config_range(_dma_coherent, _dma_coherent_size, UNCACHED_MEM); /* set ttb as secure */ mmu_config_range(_ttb, _ttb_size, SECURE_MEM); mmu_enable(); }
int mmu_setup() { CurrentPageTable = (uint32_t*) PageTable; // Initialize the page table initialize_pagetable(); // Implement the page table // Disable checking TLB permissions for domain 0, which is the only domain we're using so anyone can access // anywhere in memory, since we trust ourselves. WriteDomainAccessControlRegister(ARM11_DomainAccessControl_D0_ALL); WriteTranslationTableBaseRegister0(CurrentPageTable); InvalidateUnifiedTLBUnlockedEntries(); mmu_enable(); InvalidateUnifiedTLBUnlockedEntries(); return 0; }
static int eukrea_cpuimx35_mmu_init(void) { mmu_init(); arm_create_section(0x80000000, 0x80000000, 128, PMD_SECT_DEF_CACHED); arm_create_section(0x90000000, 0x80000000, 128, PMD_SECT_DEF_UNCACHED); setup_dma_coherent(0x10000000); #if TEXT_BASE & (0x100000 - 1) #warning cannot create vector section. Adjust TEXT_BASE to a 1M boundary #else arm_create_section(0x0, TEXT_BASE, 1, PMD_SECT_DEF_UNCACHED); #endif mmu_enable(); #ifdef CONFIG_CACHE_L2X0 l2x0_init((void __iomem *)0x30000000, 0x00030024, 0x00000000); #endif return 0; }
void arch_setup(struct service_backend *backend) { core_init(&core0, 0, ARCH_KERNELSTACKADDRESS + ARCH_KERNELSTACKSIZE, 0); arch_configuregdt(); arch_configureidt(); arch_configuretss(&tss0, core0.id, core0.sp); mapkernel(0, 0x00000000, 0x00000000, 0x00400000); mapkernel(1, 0x00400000, 0x00400000, 0x00400000); mapkernel(2, 0x00800000, 0x00800000, 0x00400000); mapkernel(3, 0x00C00000, 0x00C00000, 0x00400000); mmu_setdirectory(getkerneldirectory()); mmu_enable(); kernel_setup((char *)ARCH_MAILBOXADDRESS); kernel_setcallback(coreget, coreassign); abi_setup(spawn, despawn); binary_setupelf(); service_setupcpio(); resource_register(&backend->resource); setuptask(); arch_leave(&core0); }
void mt8173_mmu_after_dram(void) { /* Map DRAM as cached now that it's up and running */ mmu_config_range(_dram, (uintptr_t)sdram_size(), CACHED_MEM); /* Unmap L2C SRAM so it can be reclaimed by L2 cache */ /* TODO: Implement true unmapping, and also use it for the zero-page! */ mmu_config_range(_sram_l2c, _sram_l2c_size, DEV_MEM); mmu_config_range(_dram_dma, _dram_dma_size, UNCACHED_MEM); /* Careful: changing cache geometry while it's active is a bad idea! */ mmu_disable(); /* Return L2C SRAM back to L2 cache. Set it to 512KiB which is the max * available L2 cache for A53 in MT8173. */ write32(&mt8173_mcucfg->mp0_ca7l_cache_config, 3 << 8); /* turn off the l2c sram clock */ write32(&mt8173_infracfg->infra_pdn0, L2C_SRAM_PDN); /* Reenable MMU with now enlarged L2 cache. Page tables still valid. */ mmu_enable(); }
/******************************************************************************* *函数名称: Boot0_C_part *函数原型:void Boot0_C_part( void ) *函数功能: Boot0中用C语言编写的部分的主流程 *入口参数: void *返 回 值: void *备 注: *******************************************************************************/ void Boot0_C_part( void ) { __u32 status; __s32 dram_size; int index = 0; int ddr_aotu_scan = 0; volatile unsigned int *reg_addr = 0; // move_RW( ); clear_ZI( ); bias_calibration(); timer_init(); UART_open( BT0_head.prvt_head.uart_port, (void *)BT0_head.prvt_head.uart_ctrl, 24*1000*1000 ); //odt_status = check_odt(5); if( BT0_head.prvt_head.enable_jtag ) { jtag_init( (normal_gpio_cfg *)BT0_head.prvt_head.jtag_gpio ); } msg("HELLO! BOOT0 is starting!\n"); print_version(); { __u32 reg_val; __u32 fel_flag; fel_flag = *(volatile unsigned int *)(0x01f00000 + 0x108); //print smp status. index = 0; while(index < 0x18) { reg_addr = (volatile unsigned int *)(0x01f00000 + 0x100 + index); reg_val = *reg_addr; *reg_addr = 0; msg("reg_addr %x =%x\n", reg_addr, reg_val); index+=0x4; } // reg_val = *(volatile unsigned int *)(0x01f00000 + 0x108); // *(volatile unsigned int *)(0x01f00000 + 0x108) = 0; // msg("fel_flag=%x\n", fel_flag); if(fel_flag == 0x5AA5A55A) { msg("eraly jump fel\n"); pll_reset(); __msdelay(10); jump_to( FEL_BASE ); } } mmu_system_init(EGON2_DRAM_BASE, 1 * 1024, EGON2_MMU_BASE); mmu_enable(); //dram_size = init_DRAM(BT0_head.boot_head.platform[7]); // 初始化DRAM //#ifdef CONFIG_SUN6I_FPGA // ddr_aotu_scan = 1; // msg("config fpga\n"); //#else // ddr_aotu_scan = BT0_head.boot_head.platform[7]; // msg("not config fpga\n"); //#endif ddr_aotu_scan = 0; #ifdef DEBUG { int k; for(k=0;k<16;k++) { msg("%x\n", BT0_head.prvt_head.dram_para[k]); } } #endif // msg("------------before------------\n"); // dram_para_display(); dram_size = init_DRAM(ddr_aotu_scan, (void *)BT0_head.prvt_head.dram_para); if(dram_size) { mdfs_save_value((void *)BT0_head.prvt_head.dram_para); msg("dram size =%d\n", dram_size); } else { msg("initializing SDRAM Fail.\n"); mmu_disable( ); pll_reset(); __msdelay(10); jump_to( FEL_BASE ); } // { // __u32 reg_val; // // reg_val = *(volatile __u32 *)(0x1c20d20); // *(volatile __u32 *)(0x1c20d20) = 0; // msg("reg_val=%x, %x\n", reg_val, *(volatile __u32 *)(0x1c20d24)); // if(reg_val & 0x01) // { // mmu_disable( ); // jump_to( 0x40100000 ); // } // } // msg("------------end------------\n"); // dram_para_display(); #if SYS_STORAGE_MEDIA_TYPE == SYS_STORAGE_MEDIA_NAND_FLASH status = load_Boot1_from_nand( ); // 载入Boot1 #elif SYS_STORAGE_MEDIA_TYPE == SYS_STORAGE_MEDIA_SPI_NOR_FLASH status = load_boot1_from_spinor( ); // 载入Boot1 #elif SYS_STORAGE_MEDIA_TYPE == SYS_STORAGE_MEDIA_SD_CARD status = load_boot1_from_sdmmc( (char *)BT0_head.prvt_head.storage_data ); // 载入boot1 #else #error The storage media of Boot1 has not been defined. #endif msg("Ready to disable icache.\n"); mmu_disable( ); // disable instruction cache if( status == OK ) { // restart_watch_dog( ); // restart watch dog //跳转boot1之前,把dram的大小写进去 //set_dram_size(dram_size ); //跳转之前,把所有的dram参数写到boot1中 set_dram_para((void *)&BT0_head.prvt_head.dram_para, dram_size); msg("Succeed in loading Boot1.\n" "Jump to Boot1.\n"); jump_to( BOOT1_BASE ); // 如果载入Boot1成功,跳转到Boot1处执行 } else { // disable_watch_dog( ); // disable watch dog msg("Fail in loading Boot1.\n" "Jump to Fel.\n"); pll_reset(); __msdelay(10); jump_to( FEL_BASE ); // 如果载入Boot1失败,将控制权交给Fel } }
void init_memory_system(){ if(cpu_get_current() !=0){ scu_join_smp(); scu_enable_maintenance_broadcast(); } /**************************************** * MMU */ // Disable MMU mmu_disable(); // Initiate MMU - initiate the peripherals mmu_init(); // Enable MMU mmu_enable(); /**************************************** * Branch prediction */ // Disable branch prediction arm_branch_prediction_disable(); // Invalidate branch prediction array arm_branch_target_cache_invalidate(); // Branch Prediction Enable arm_branch_prediction_enable(); /**************************************** * Data Cache */ // Disable L1 Data Caches arm_dcache_disable(); // Invalidate Data cache arm_dcache_invalidate(); // Enable Data cache arm_dcache_enable(); /**************************************** * Instruction Cache */ // Disable L1 Instruction cache arm_icache_disable(); // Invalidate Instruction cache arm_icache_invalidate(); // Enable Instruction cache arm_icache_enable(); /**************************************** * L2 Cache */ if(cpu_get_current() == 0){ // Disable L2 _l2c310_cache_disable(); // Set up L2 cache _l2c310_cache_setup(); // Invalidate L2 cache _l2c310_cache_invalidate(); // Enable L2 cache _l2c310_cache_enable(); scu_enable(); scu_join_smp(); } }