void paging_context_switch(lpaddr_t ttbr) { // printf("paging context switch to %"PRIxLPADDR"\n", ttbr); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); cp15_invalidate_i_and_d_caches(); } }
/** * /brief Perform a context switch. Reload TTBR0 with the new * address, and invalidate the TLBs and caches. */ void paging_context_switch(lpaddr_t ttbr) { assert(ttbr > MEMORY_OFFSET); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); } }
void paging_context_switch(lpaddr_t ttbr) { assert(ttbr < MEMORY_OFFSET); //assert((ttbr & 0x3fff) == 0); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); //this isn't necessary on gem5, since gem5 doesn't implement the cache //maintenance instructions, but ensures coherency by itself //cp15_invalidate_i_and_d_caches(); } }
/** * Create initial (temporary) page tables. * * We use 1MB (ARM_L1_SECTION_BYTES) pages (sections) with a single-level table. * This allows 1MB*4k (ARM_L1_MAX_ENTRIES) = 4G per pagetable. * * Hardware details can be found in: * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition * B3: Virtual Memory System Architecture (VMSA) */ void paging_init(void) { /** * Make sure our page tables are correctly aligned in memory */ assert(ROUND_UP((lpaddr_t)l1_low, ARM_L1_ALIGN) == (lpaddr_t)l1_low); assert(ROUND_UP((lpaddr_t)l1_high, ARM_L1_ALIGN) == (lpaddr_t)l1_high); /** * On ARMv7-A, physical RAM (PHYS_MEMORY_START) is the same with the * offset of mapped physical memory within virtual address space * (PHYS_MEMORY_START). */ STATIC_ASSERT(MEMORY_OFFSET == PHYS_MEMORY_START, ""); /** * Zero the page tables: this has the effect of marking every PTE * as invalid. */ memset(&l1_low, 0, sizeof(l1_low)); memset(&l1_high, 0, sizeof(l1_high)); memset(&l2_vec, 0, sizeof(l2_vec)); /** * Now we lay out the kernel's virtual address space. * * 00000000-7FFFFFFFF: 1-1 mappings (hardware we have not mapped * into high kernel space yet) * 80000000-BFFFFFFFF: 1-1 mappings (this is 1GB of RAM) * C0000000-FEFFFFFFF: On-demand mappings of hardware devices, * allocated descending from DEVICE_OFFSET. * FF000000-FFEFFFFFF: Unallocated. * FFF00000-FFFFFFFFF: L2 table, containing: * FFF00000-FFFEFFFF: Unallocated * FFFF0000-FFFFFFFF: Exception vectors */ lvaddr_t base = 0; size_t i; for (i=0, base = 0; i < ARM_L1_MAX_ENTRIES/2; i++) { map_kernel_section_lo(base, make_dev_section(base)); base += ARM_L1_SECTION_BYTES; } for (i=0, base = MEMORY_OFFSET; i < ARM_L1_MAX_ENTRIES/4; i++) { map_kernel_section_hi(base, make_ram_section(base)); base += ARM_L1_SECTION_BYTES; } /* Map the exception vectors. */ map_vectors(); /** * TTBCR: Translation Table Base Control register. * TTBCR.N is bits[2:0] * In a TLB miss TTBCR.N determines whether TTBR0 or TTBR1 is used as the * base address for the translation table walk in memory: * N == 0 -> always use TTBR0 * N > 0 -> if VA[31:32-N] > 0 use TTBR1 else use TTBR0 * * TTBR0 is typically used for processes-specific addresses * TTBR1 is typically used for OS addresses that do not change on context * switch * * set TTBCR.N = 1 to use TTBR1 for VAs >= MEMORY_OFFSET (=2GB) */ assert(mmu_enabled == false); cp15_invalidate_i_and_d_caches_fast(); cp15_invalidate_tlb(); cp15_write_ttbr1((lpaddr_t)l1_high); cp15_write_ttbr0((lpaddr_t)l1_low); #define TTBCR_N 1 uint32_t ttbcr = cp15_read_ttbcr(); ttbcr = (ttbcr & ~7) | TTBCR_N; cp15_write_ttbcr(ttbcr); STATIC_ASSERT(1UL<<(32-TTBCR_N) == MEMORY_OFFSET, ""); #undef TTBCR_N cp15_enable_mmu(); cp15_enable_alignment(); cp15_invalidate_i_and_d_caches_fast(); cp15_invalidate_tlb(); mmu_enabled = true; }