void paging_context_switch(lpaddr_t ttbr) { // printf("paging context switch to %"PRIxLPADDR"\n", ttbr); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); cp15_invalidate_i_and_d_caches(); } }
/** * /brief Perform a context switch. Reload TTBR0 with the new * address, and invalidate the TLBs and caches. */ void paging_context_switch(lpaddr_t ttbr) { assert(ttbr > MEMORY_OFFSET); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); } }
static void paging_write_l1_entry(uintptr_t ttbase, lvaddr_t va, union arm_l1_entry l1) { union arm_l1_entry *l1_table; if (ttbase == 0) { if(va < MEMORY_OFFSET) ttbase = cp15_read_ttbr0() + MEMORY_OFFSET; else ttbase = cp15_read_ttbr1() + MEMORY_OFFSET; } l1_table = (union arm_l1_entry *) ttbase; l1_table[ARM_L1_OFFSET(va)] = l1; }
/* * Describe me */ void paging_make_good(lvaddr_t new_table_base, size_t new_table_bytes) { assert(new_table_base >= MEMORY_OFFSET); assert(new_table_bytes == ARM_L1_ALIGN); assert(aligned(new_table_base, ARM_L1_ALIGN)); lvaddr_t ttbr = local_phys_to_mem(cp15_read_ttbr0()); size_t st = (MEMORY_OFFSET / ARM_L1_SECTION_BYTES) * ARM_L1_BYTES_PER_ENTRY; // Copy kernel pages (everything from MEMORY_OFFSET upwards) memcpy((void*)new_table_base + st, (void*)ttbr + st, ARM_L1_MAX_ENTRIES * ARM_L1_BYTES_PER_ENTRY - st); }
void paging_context_switch(lpaddr_t ttbr) { assert(ttbr < MEMORY_OFFSET); //assert((ttbr & 0x3fff) == 0); lpaddr_t old_ttbr = cp15_read_ttbr0(); if (ttbr != old_ttbr) { cp15_write_ttbr0(ttbr); cp15_invalidate_tlb(); //this isn't necessary on gem5, since gem5 doesn't implement the cache //maintenance instructions, but ensures coherency by itself //cp15_invalidate_i_and_d_caches(); } }