void MMU_CreateTranslationTable(void) { mmu_region_attributes_Type region; //Create 4GB of faulting entries MMU_TTSection (&Image$$TTB$$ZI$$Base, 0, 4096, DESCRIPTOR_FAULT); /* * Generate descriptors. Refer to core_ca.h to get information about attributes * */ //Create descriptors for Vectors, RO, RW, ZI sections section_normal(Sect_Normal, region); section_normal_cod(Sect_Normal_Cod, region); section_normal_ro(Sect_Normal_RO, region); section_normal_rw(Sect_Normal_RW, region); //Create descriptors for peripherals section_device_ro(Sect_Device_RO, region); section_device_rw(Sect_Device_RW, region); //Create descriptors for 64k pages page64k_device_rw(Page_L1_64k, Page_64k_Device_RW, region); //Create descriptors for 4k pages page4k_device_rw(Page_L1_4k, Page_4k_Device_RW, region); /* * Define MMU flat-map regions and attributes * */ //Define Image MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, 1, Sect_Normal_Cod); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, 1, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, 1, Sect_Normal_RW); //all DRAM executable, rw, cacheable - applications may choose to divide memory into ro executable MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$TTB$$ZI$$Base, 2043, Sect_Normal); //--------------------- PERIPHERALS ------------------- MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_FLASH_BASE0 , 64, Sect_Device_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_FLASH_BASE1 , 64, Sect_Device_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_SRAM_BASE , 64, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_VRAM_BASE , 32, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_ETHERNET_BASE , 16, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, VE_A9_MP_USB_BASE , 16, Sect_Device_RW); // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C000000-0x1C00FFFF MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_A_FAULT , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT); // Define peripheral range 0x1C000000-0x1C00FFFF MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_DAP_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_SYSTEM_REG_BASE, 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_SERIAL_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_AACI_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_MMCI_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_KMI0_BASE , 2, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_UART_BASE , 4, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_WDT_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_A_TABLE_L2_BASE_64k, Page_64k_Device_RW); // Create (16 * 64k)=1MB faulting entries to cover peripheral range 0x1C100000-0x1C10FFFF MMU_TTPage64k(&Image$$TTB$$ZI$$Base, PERIPHERAL_B_FAULT , 16, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, DESCRIPTOR_FAULT); // Define peripheral range 0x1C100000-0x1C10FFFF MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_TIMER_BASE , 2, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_DVI_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_RTC_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_UART4_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW); MMU_TTPage64k(&Image$$TTB$$ZI$$Base, VE_A9_MP_CLCD_BASE , 1, Page_L1_64k, (uint32_t *)PERIPHERAL_B_TABLE_L2_BASE_64k, Page_64k_Device_RW); // Create (256 * 4k)=1MB faulting entries to cover private address space. Needs to be marked as Device memory MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR() ,256, Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT); // Define private address space entry. MMU_TTPage4k (&Image$$TTB$$ZI$$Base, __get_CBAR() , 2, Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW); // Define L2CC entry. Uncomment if PL310 is present // MMU_TTPage4k (&Image$$TTB$$ZI$$Base, VE_A9_MP_PL310_BASE , 1, Page_L1_4k, (uint32_t *)PRIVATE_TABLE_L2_BASE_4k, Page_4k_Device_RW); // Create (256 * 4k)=1MB faulting entries to synchronization space (Useful if some non-cacheable DMA agent is present in the SoC) MMU_TTPage4k (&Image$$TTB$$ZI$$Base, F_SYNC_BASE , 256, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, DESCRIPTOR_FAULT); // Define synchronization space entry. MMU_TTPage4k (&Image$$TTB$$ZI$$Base, FLAG_SYNC , 1, Page_L1_4k, (uint32_t *)SYNC_FLAGS_TABLE_L2_BASE_4k, Page_4k_Device_RW); /* Set location of level 1 page table ; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset) ; 13:7 - 0x0 ; 6 - IRGN[0] 0x0 (Inner WB WA) ; 5 - NOS 0x0 (Non-shared) ; 4:3 - RGN 0x1 (Outer WB WA) ; 2 - IMP 0x0 (Implementation Defined) ; 1 - S 0x0 (Non-shared) ; 0 - IRGN[1] 0x1 (Inner WB WA) */ __set_TTBR0(((uint32_t)&Image$$TTB$$ZI$$Base) | 9); __ISB(); /* Set up domain access control register ; We set domain 0 to Client and all other domains to No Access. ; All translation table entries specify domain 0 */ __set_DACR(1); __ISB(); }
void MMU_CreateTranslationTable(void) { mmu_region_attributes_Type region; #if defined ( __ICCARM__ ) #pragma section=".intvec" #pragma section=".rodata" #pragma section=".rwdata" #pragma section=".bss" Image$$VECTORS$$Base = (uint32_t) __section_begin(".intvec"); Image$$VECTORS$$Limit= ((uint32_t)__section_begin(".intvec")+(uint32_t)__section_size(".intvec")); Image$$RO_DATA$$Base = (uint32_t) __section_begin(".rodata"); Image$$RO_DATA$$Limit= ((uint32_t)__section_begin(".rodata")+(uint32_t)__section_size(".rodata")); Image$$RW_DATA$$Base = (uint32_t) __section_begin(".rwdata"); Image$$RW_DATA$$Limit= ((uint32_t)__section_begin(".rwdata")+(uint32_t)__section_size(".rwdata")); Image$$RW_IRAM1$$Base = (uint32_t) __section_begin(".bss"); Image$$RW_IRAM1$$Limit= ((uint32_t)__section_begin(".bss")+(uint32_t)__section_size(".bss")); #endif /* * Generate descriptors. Refer to core_ca.h to get information about attributes * */ //Create descriptors for Vectors, RO, RW, ZI sections section_normal(Sect_Normal, region); section_normal_cod(Sect_Normal_Cod, region); section_normal_ro(Sect_Normal_RO, region); section_normal_rw(Sect_Normal_RW, region); //Create descriptors for peripherals section_device_ro(Sect_Device_RO, region); section_device_rw(Sect_Device_RW, region); section_normal_nc(Sect_Normal_NC, region); //Create descriptors for 64k pages page64k_device_rw(Page_L1_64k, Page_64k_Device_RW, region); //Create descriptors for 4k pages page4k_device_rw(Page_L1_4k, Page_4k_Device_RW, region); /* * Define MMU flat-map regions and attributes * */ //Create 4GB of faulting entries MMU_TTSection (&Image$$TTB$$ZI$$Base, 0, 4096, DESCRIPTOR_FAULT); // R7S72100 memory map. MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_NORFLASH_BASE0 , 64, Sect_Normal_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_NORFLASH_BASE1 , 64, Sect_Normal_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_SDRAM_BASE0 , 64, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_SDRAM_BASE1 , 64, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_USER_AREA0 , 64, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_USER_AREA1 , 64, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_SPI_IO0 , 64, Sect_Normal_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_SPI_IO1 , 64, Sect_Normal_RO); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_ONCHIP_SRAM_BASE , 10, Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_SPI_MIO_BASE , 1, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_BSC_BASE , 1, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_PERIPH_BASE0 , 3, Sect_Device_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_PERIPH_BASE1 , 49, Sect_Device_RW); #if defined( __ICCARM__ ) //Define Image MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RO_DATA$$Base , RO_DATA_SIZE , Sect_Normal_Cod); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$VECTORS$$Base , VECTORS_SIZE , Sect_Normal_Cod); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RW_DATA$$Base , RW_DATA_SIZE , Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RW_IRAM1$$Base, RW_IRAM1_SIZE, Sect_Normal_RW); #else //Define Image MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RO_DATA$$Base , RO_DATA_SIZE , Sect_Normal_Cod); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base , VECTORS_SIZE , Sect_Normal_Cod); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base , RW_DATA_SIZE , Sect_Normal_RW); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_IRAM1$$Base, RW_IRAM1_SIZE, Sect_Normal_RW); #endif #if defined( __CC_ARM ) MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_ONCHIP_SRAM_NC_BASE , 10, Sect_Normal_NC); #elif defined ( __ICCARM__ ) MMU_TTSection (&Image$$TTB$$ZI$$Base, RZ_A1_ONCHIP_SRAM_NC_BASE , 10, Sect_Normal_NC); #else MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA_NC$$Base, RW_DATA_NC_SIZE, Sect_Normal_NC); MMU_TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA_NC$$Base, ZI_DATA_NC_SIZE, Sect_Normal_NC); #endif /* Set location of level 1 page table ; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset) ; 13:7 - 0x0 ; 6 - IRGN[0] 0x0 (Inner WB WA) ; 5 - NOS 0x0 (Non-shared) ; 4:3 - RGN 0x1 (Outer WB WA) ; 2 - IMP 0x0 (Implementation Defined) ; 1 - S 0x0 (Non-shared) ; 0 - IRGN[1] 0x1 (Inner WB WA) */ __set_TTBR0(((uint32_t)&Image$$TTB$$ZI$$Base) | 9); __ISB(); /* Set up domain access control register ; We set domain 0 to Client and all other domains to No Access. ; All translation table entries specify domain 0 */ __set_DACR(1); __ISB(); }
/** * @brief Core/MMU Module initialization. * @note This function is implicitly invoked on system initialization, * there is no need to explicitly initialize the module. * * @notapi */ void __core_init(void) { uint32_t pm; /* * Invalidate L1 D Cache if it was disabled */ pm = __get_SCTLR(); if ((pm & SCTLR_C_Msk) == 0) { __L1C_CleanInvalidateCache(DCISW_INVALIDATE); } /* * Default, undefined regions */ for (pm = 0; pm < 4096; ++pm) mmuTable[pm] = TTE_SECT_UNDEF; /* * ROM region * * 0x00000000 */ mmuTable[0] = TTE_SECT_SECTION(0x00000000) | TTE_SECT_MEM_NO_CACHEABLE | TTE_SECT_RO_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * NFC SRAM region * * 0x00100000 */ mmuTable[1] = TTE_SECT_SECTION(0x00100000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * SRAM region * * 0x00200000 */ mmuTable[2] = TTE_SECT_SECTION(0x00200000) | TTE_SECT_MEM_CACHEABLE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * UDPHS RAM region * * 0x00300000 */ mmuTable[3] = TTE_SECT_SECTION(0x00300000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * UHPHS region * * 0x00400000 */ mmuTable[4] = TTE_SECT_SECTION(0x00400000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * UDPHS region * * 0x00500000 */ mmuTable[5] = TTE_SECT_SECTION(0x00500000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * AXIMX region * * 0x00600000 */ mmuTable[6] = TTE_SECT_SECTION(0x00600000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * DAP region * * 0x00700000 */ mmuTable[7] = TTE_SECT_SECTION(0x00700000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * L2CC region, low * * 0x00a00000 */ mmuTable[0xa] = TTE_SECT_SECTION(0x00a00000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * L2CC region, hi * * 0x00b00000 */ mmuTable[0xb] = TTE_SECT_SECTION(0x00b00000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * EBI regions * * 0x10000000 - 0x1fffffff */ for (pm = 0x100; pm < 0x200; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * DDR regions * * 0x20000000 - 0x3fffffff */ for (pm = 0x200; pm < 0x400; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_CACHEABLE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * DDR AESB regions * * 0x40000000 - 0x5fffffff */ for (pm = 0x400; pm < 0x600; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_CACHEABLE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * EBI 1, 2 and 3 regions * * 0x60000000 - 0x8fffffff */ for (pm = 0x600; pm < 0x900; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * QSPI0/1 AESB MEM regions * * 0x90000000 - 0x9fffffff */ for (pm = 0x900; pm < 0xa00; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * SDMMC0/1 regions * * 0xa0000000 - 0xbfffffff */ for (pm = 0xa00; pm < 0xc00; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * NFC regions * * 0xc0000000 - 0xcfffffff */ for (pm = 0xc00; pm < 0xd00; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * QSPI0/1 MEM regions * * 0xd0000000 - 0xdfffffff */ for (pm = 0xd00; pm < 0xe00; pm++) mmuTable[pm] = TTE_SECT_SECTION(pm << 20) | TTE_SECT_MEM_STRONGLY_ORD | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_S | TTE_TYPE_SECT; /* * Internal peripherals regions * * 0xf0000000 * 0xf8000000 * 0xfc000000 */ mmuTable[0xf00] = TTE_SECT_SECTION(0xf0000000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; mmuTable[0xf80] = TTE_SECT_SECTION(0xf8000000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; mmuTable[0xfc0] = TTE_SECT_SECTION(0xfc000000) | TTE_SECT_DEVICE | TTE_SECT_RW_ACCESS | TTE_SECT_DOM(0x0F) | TTE_SECT_EXE_NEVER | TTE_SECT_S | TTE_TYPE_SECT; /* * Invalidate TLB and L1 I cache * Enable caches and MMU */ MMU_InvalidateTLB(); __set_TTBR0((uint32_t)mmuTable|0x5B); __set_DACR(0xC0000000); __DSB(); __ISB(); /* * L1 I cache invalidate and enable */ pm = __get_SCTLR(); if ((pm & SCTLR_I_Msk) == 0) { __set_ICIALLU(0); __set_SCTLR(pm | SCTLR_I_Msk); } /* * MMU enable */ pm = __get_SCTLR(); if ((pm & SCTLR_M_Msk) == 0) __set_SCTLR(pm | SCTLR_M_Msk); /* * L1 D cache enable */ pm = __get_SCTLR(); if ((pm & SCTLR_C_Msk) == 0) { __set_SCTLR(pm | SCTLR_C_Msk); } }