Exemple #1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // make sure low l2 pagetable is aligned to 1K after relocation
    aligned_low_l2_table = (union arm_l2_entry *)ROUND_UP((uintptr_t)low_l2_table, ARM_L2_ALIGN);

    // Re-map physical memory
    paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);

    // map first MB at granularity of 4K pages
    uint32_t l2_flags = ARM_L2_SMALL_USR_NONE | ARM_L2_SMALL_CACHEABLE | ARM_L2_SMALL_BUFFERABLE;
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, MEMORY_OFFSET,
                             mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    for(lpaddr_t pa=0; pa < ARM_L1_SECTION_BYTES; pa += BYTES_PER_PAGE)
    {
        lvaddr_t va = pa + MEMORY_OFFSET;
        paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(va)], pa, l2_flags);
    }

    // map high-mem relocated exception vector to corresponding page in low MB
    // core 0: 0xffff0000 -> 0x80000
    // core 1: 0xffff0000 -> 0x81000
    // ...
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
            mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    int core_id = hal_get_cpu_id();
    lpaddr_t addr = ETABLE_PHYS_BASE + core_id * BASE_PAGE_SIZE;
    paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(ETABLE_ADDR)], addr, l2_flags);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));

    cp15_invalidate_tlb();
}
Exemple #2
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP(
            (uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // Re-map physical memory
    //
    paging_map_memory((uintptr_t)aligned_kernel_l1_table , paddr, bytes);

    //map high-mem relocated exception vector to kernel section
    paging_map_kernel_section((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
                              PHYS_MEMORY_START);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_tlb();
}
Exemple #3
0
lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
{
    // HACK to put device in high memory.
    // Should likely track these allocations.
    static lvaddr_t dev_alloc = DEVICE_OFFSET;
    assert(device_bytes <= BYTES_PER_SECTION);
    dev_alloc -= BYTES_PER_SECTION;

    printf("paging_map_device_section: 0x%"PRIxLVADDR", 0x%"PRIxLVADDR", "
            "0x%"PRIxLPADDR".\n",
            (uintptr_t)aligned_kernel_l1_table, dev_alloc, device_base);

    paging_map_device_section((uintptr_t)aligned_kernel_l1_table, dev_alloc,
            device_base);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();

    return dev_alloc;
}
Exemple #4
0
/**
 * Create initial (temporary) page tables.
 *
 * We use 1MB (ARM_L1_SECTION_BYTES) pages (sections) with a single-level table.
 * This allows 1MB*4k (ARM_L1_MAX_ENTRIES) = 4G per pagetable.
 *
 * Hardware details can be found in:
 * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
 *   B3: Virtual Memory System Architecture (VMSA)
 */
void paging_init(void)
{
    /**
     * Make sure our page tables are correctly aligned in memory
     */
    assert(ROUND_UP((lpaddr_t)l1_low, ARM_L1_ALIGN) == (lpaddr_t)l1_low);
    assert(ROUND_UP((lpaddr_t)l1_high, ARM_L1_ALIGN) == (lpaddr_t)l1_high);

    /**
     * On ARMv7-A, physical RAM (PHYS_MEMORY_START) is the same with the
     * offset of mapped physical memory within virtual address space
     * (PHYS_MEMORY_START). 
     */
    STATIC_ASSERT(MEMORY_OFFSET == PHYS_MEMORY_START, "");

    /**
     * Zero the page tables: this has the effect of marking every PTE
     * as invalid.
     */
    memset(&l1_low,  0, sizeof(l1_low));
    memset(&l1_high, 0, sizeof(l1_high));
    memset(&l2_vec,  0, sizeof(l2_vec));

    /**
     * Now we lay out the kernel's virtual address space.
     *
     * 00000000-7FFFFFFFF: 1-1 mappings (hardware we have not mapped
     *                     into high kernel space yet)
     * 80000000-BFFFFFFFF: 1-1 mappings (this is 1GB of RAM)
     * C0000000-FEFFFFFFF: On-demand mappings of hardware devices,
     *                     allocated descending from DEVICE_OFFSET.
     * FF000000-FFEFFFFFF: Unallocated.
     * FFF00000-FFFFFFFFF: L2 table, containing:
     *      FFF00000-FFFEFFFF: Unallocated
     *      FFFF0000-FFFFFFFF: Exception vectors
     */    
    lvaddr_t base = 0;
    size_t i;
    for (i=0, base = 0; i < ARM_L1_MAX_ENTRIES/2; i++) {
        map_kernel_section_lo(base, make_dev_section(base));
        base += ARM_L1_SECTION_BYTES;
    }
    for (i=0, base = MEMORY_OFFSET; i < ARM_L1_MAX_ENTRIES/4; i++) {
        map_kernel_section_hi(base, make_ram_section(base));
        base += ARM_L1_SECTION_BYTES;
    }

    /* Map the exception vectors. */
    map_vectors();

    /**
     * TTBCR: Translation Table Base Control register.
     *  TTBCR.N is bits[2:0]
     * In a TLB miss TTBCR.N determines whether TTBR0 or TTBR1 is used as the
     * base address for the translation table walk in memory:
     *  N == 0 -> always use TTBR0
     *  N >  0 -> if VA[31:32-N] > 0 use TTBR1 else use TTBR0
     *
     * TTBR0 is typically used for processes-specific addresses
     * TTBR1 is typically used for OS addresses that do not change on context
     *       switch
     *
     * set TTBCR.N = 1 to use TTBR1 for VAs >= MEMORY_OFFSET (=2GB)
     */
    assert(mmu_enabled == false);
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    cp15_write_ttbr1((lpaddr_t)l1_high);
    cp15_write_ttbr0((lpaddr_t)l1_low);
    #define TTBCR_N 1
    uint32_t ttbcr = cp15_read_ttbcr();
    ttbcr =  (ttbcr & ~7) | TTBCR_N;
    cp15_write_ttbcr(ttbcr);
    STATIC_ASSERT(1UL<<(32-TTBCR_N) == MEMORY_OFFSET, "");
    #undef TTBCR_N
    cp15_enable_mmu();
    cp15_enable_alignment();
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    mmu_enabled = true;
}