Beispiel #1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // make sure low l2 pagetable is aligned to 1K after relocation
    aligned_low_l2_table = (union arm_l2_entry *)ROUND_UP((uintptr_t)low_l2_table, ARM_L2_ALIGN);

    // Re-map physical memory
    paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);

    // map first MB at granularity of 4K pages
    uint32_t l2_flags = ARM_L2_SMALL_USR_NONE | ARM_L2_SMALL_CACHEABLE | ARM_L2_SMALL_BUFFERABLE;
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, MEMORY_OFFSET,
                             mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    for(lpaddr_t pa=0; pa < ARM_L1_SECTION_BYTES; pa += BYTES_PER_PAGE)
    {
        lvaddr_t va = pa + MEMORY_OFFSET;
        paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(va)], pa, l2_flags);
    }

    // map high-mem relocated exception vector to corresponding page in low MB
    // core 0: 0xffff0000 -> 0x80000
    // core 1: 0xffff0000 -> 0x81000
    // ...
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
            mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    int core_id = hal_get_cpu_id();
    lpaddr_t addr = ETABLE_PHYS_BASE + core_id * BASE_PAGE_SIZE;
    paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(ETABLE_ADDR)], addr, l2_flags);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));

    cp15_invalidate_tlb();
}
Beispiel #2
0
void paging_context_switch(lpaddr_t ttbr)
{
//    printf("paging context switch to %"PRIxLPADDR"\n", ttbr);
    lpaddr_t old_ttbr = cp15_read_ttbr0();
    if (ttbr != old_ttbr) {
        cp15_write_ttbr0(ttbr);
        cp15_invalidate_tlb();
        cp15_invalidate_i_and_d_caches();
    }
}
Beispiel #3
0
/**
 * /brief Perform a context switch.  Reload TTBR0 with the new
 * address, and invalidate the TLBs and caches. 
 */
void paging_context_switch(lpaddr_t ttbr)
{
    assert(ttbr > MEMORY_OFFSET);
    lpaddr_t old_ttbr = cp15_read_ttbr0();
    if (ttbr != old_ttbr)
    {
        cp15_write_ttbr0(ttbr);
        cp15_invalidate_tlb();
    }
}
Beispiel #4
0
/**
 * \brief Map a device into the kernel's address space.  
 * 
 * \param device_base is the physical address of the device
 * \param device_size is the number of bytes of physical address space
 * the device occupies. 
 *
 * \return the kernel virtual address of the mapped device, or panic. 
 */
lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
{
    // We map all hardware devices in the kernel using sections in the
    // top quarter (0xC0000000-0xFE000000) of the address space, just
    // below the exception vectors.  
    // 
    // It makes sense to use sections since (1) we don't map many
    // devices in the CPU driver anyway, and (2) if we did, it might
    // save a wee bit of TLB space. 
    //

    // First, we make sure that the device fits into a single
    // section. 
    if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
        panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
              dev_base, dev_size );
    }
    
    // Now, walk down the page table looking for either (a) an

    // existing mapping, in which case return the address the device
    // is already mapped to, or an invalid mapping, in which case map
    // it. 
    uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
    uint32_t dev_offset  = ARM_L1_SECTION_OFFSET(dev_base);
    lvaddr_t dev_virt    = 0;
    
    for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {

        // Work out the virtual address we're looking at
        dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);

        // If we already have a mapping for that address, return it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
             l1_high[i].section.base_address == dev_section ) {
            return dev_virt + dev_offset;
        }

        // Otherwise, if it's free, map it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
            map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
            cp15_invalidate_i_and_d_caches_fast();
            cp15_invalidate_tlb();
            return dev_virt + dev_offset;
        } 
    }
    // We're all out of section entries :-(
    panic("Ran out of section entries to map a kernel device");
}
Beispiel #5
0
void paging_context_switch(lpaddr_t ttbr)
{
    assert(ttbr < MEMORY_OFFSET);
    //assert((ttbr & 0x3fff) == 0);

    lpaddr_t old_ttbr = cp15_read_ttbr0();
    if (ttbr != old_ttbr)
    {
        cp15_write_ttbr0(ttbr);
        cp15_invalidate_tlb();
        //this isn't necessary on gem5, since gem5 doesn't implement the cache
        //maintenance instructions, but ensures coherency by itself
        //cp15_invalidate_i_and_d_caches();
    }
}
Beispiel #6
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP(
            (uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // Re-map physical memory
    //
    paging_map_memory((uintptr_t)aligned_kernel_l1_table , paddr, bytes);

    //map high-mem relocated exception vector to kernel section
    paging_map_kernel_section((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
                              PHYS_MEMORY_START);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_tlb();
}
Beispiel #7
0
lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
{
    // HACK to put device in high memory.
    // Should likely track these allocations.
    static lvaddr_t dev_alloc = DEVICE_OFFSET;
    assert(device_bytes <= BYTES_PER_SECTION);
    dev_alloc -= BYTES_PER_SECTION;

    printf("paging_map_device_section: 0x%"PRIxLVADDR", 0x%"PRIxLVADDR", "
            "0x%"PRIxLPADDR".\n",
            (uintptr_t)aligned_kernel_l1_table, dev_alloc, device_base);

    paging_map_device_section((uintptr_t)aligned_kernel_l1_table, dev_alloc,
            device_base);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();

    return dev_alloc;
}
Beispiel #8
0
static errval_t
caps_map_l1(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count)
{
    //
    // Note:
    //
    // We have chicken-and-egg problem in initializing resources so
    // instead of treating an L2 table it's actual 1K size, we treat
    // it as being 4K. As a result when we map an "L2" table we actually
    // map a page of memory as if it is 4 consecutive L2 tables.
    //
    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
    //
    const int ARM_L1_SCALE = 4;

    if (slot >= 1024) {
        printf("slot = %"PRIuCSLOT"\n",slot);
        panic("oops: slot id >= 1024");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count != 1) {
        printf("pte_count = %zu\n",(size_t)pte_count);
        panic("oops: pte_count");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_ARM_l2) {
        //large page mapping goes here
        printf("kernel large page\n");
        //panic("oops: wrong src type");
        assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

        // ARM L1 has 4K entries, but we treat it as if it had 1K
        if (slot >= (256 * 4)) {
            panic("oops: slot >= (256 * 4)");
            return SYS_ERR_VNODE_SLOT_INVALID;
        }

        if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
            panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
            return SYS_ERR_WRONG_MAPPING;
        }

        // check offset within frame
        if ((offset + BYTES_PER_SECTION > get_size(src)) ||
            ((offset % BYTES_PER_SECTION) != 0)) {
            panic("oops: frame offset invalid");
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        // check mapping does not overlap leaf page table
        if (slot + pte_count > (256 * 4)) {
            return SYS_ERR_VM_MAP_SIZE;
        }

        // Destination
        lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
        lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

        union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
        if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
            panic("Remapping valid page.");
        }

        lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
        if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
            panic("Invalid target");
        }

        struct cte *src_cte = cte_for_cap(src);
        src_cte->mapping_info.pte_count = pte_count;
        src_cte->mapping_info.pte = dest_lpaddr;
        src_cte->mapping_info.offset = offset;

        for (int i = 0; i < pte_count; i++) {
            entry->raw = 0;

            entry->section.type = L1_TYPE_SECTION_ENTRY;
            entry->section.bufferable = 1;
            entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
            entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
            entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
            entry->section.ap2 = 0;
            entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 12;

            entry++;

            debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
                   dest_lvaddr, slot, entry, entry->raw);
        }

        // Flush TLB if remapping.
        cp15_invalidate_tlb();
        return SYS_ERR_OK;
        return SYS_ERR_WRONG_MAPPING;
    }
Beispiel #9
0
/**
 * Create initial (temporary) page tables.
 *
 * We use 1MB (ARM_L1_SECTION_BYTES) pages (sections) with a single-level table.
 * This allows 1MB*4k (ARM_L1_MAX_ENTRIES) = 4G per pagetable.
 *
 * Hardware details can be found in:
 * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
 *   B3: Virtual Memory System Architecture (VMSA)
 */
void paging_init(void)
{
    /**
     * Make sure our page tables are correctly aligned in memory
     */
    assert(ROUND_UP((lpaddr_t)l1_low, ARM_L1_ALIGN) == (lpaddr_t)l1_low);
    assert(ROUND_UP((lpaddr_t)l1_high, ARM_L1_ALIGN) == (lpaddr_t)l1_high);

    /**
     * On ARMv7-A, physical RAM (PHYS_MEMORY_START) is the same with the
     * offset of mapped physical memory within virtual address space
     * (PHYS_MEMORY_START). 
     */
    STATIC_ASSERT(MEMORY_OFFSET == PHYS_MEMORY_START, "");

    /**
     * Zero the page tables: this has the effect of marking every PTE
     * as invalid.
     */
    memset(&l1_low,  0, sizeof(l1_low));
    memset(&l1_high, 0, sizeof(l1_high));
    memset(&l2_vec,  0, sizeof(l2_vec));

    /**
     * Now we lay out the kernel's virtual address space.
     *
     * 00000000-7FFFFFFFF: 1-1 mappings (hardware we have not mapped
     *                     into high kernel space yet)
     * 80000000-BFFFFFFFF: 1-1 mappings (this is 1GB of RAM)
     * C0000000-FEFFFFFFF: On-demand mappings of hardware devices,
     *                     allocated descending from DEVICE_OFFSET.
     * FF000000-FFEFFFFFF: Unallocated.
     * FFF00000-FFFFFFFFF: L2 table, containing:
     *      FFF00000-FFFEFFFF: Unallocated
     *      FFFF0000-FFFFFFFF: Exception vectors
     */    
    lvaddr_t base = 0;
    size_t i;
    for (i=0, base = 0; i < ARM_L1_MAX_ENTRIES/2; i++) {
        map_kernel_section_lo(base, make_dev_section(base));
        base += ARM_L1_SECTION_BYTES;
    }
    for (i=0, base = MEMORY_OFFSET; i < ARM_L1_MAX_ENTRIES/4; i++) {
        map_kernel_section_hi(base, make_ram_section(base));
        base += ARM_L1_SECTION_BYTES;
    }

    /* Map the exception vectors. */
    map_vectors();

    /**
     * TTBCR: Translation Table Base Control register.
     *  TTBCR.N is bits[2:0]
     * In a TLB miss TTBCR.N determines whether TTBR0 or TTBR1 is used as the
     * base address for the translation table walk in memory:
     *  N == 0 -> always use TTBR0
     *  N >  0 -> if VA[31:32-N] > 0 use TTBR1 else use TTBR0
     *
     * TTBR0 is typically used for processes-specific addresses
     * TTBR1 is typically used for OS addresses that do not change on context
     *       switch
     *
     * set TTBCR.N = 1 to use TTBR1 for VAs >= MEMORY_OFFSET (=2GB)
     */
    assert(mmu_enabled == false);
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    cp15_write_ttbr1((lpaddr_t)l1_high);
    cp15_write_ttbr0((lpaddr_t)l1_low);
    #define TTBCR_N 1
    uint32_t ttbcr = cp15_read_ttbcr();
    ttbcr =  (ttbcr & ~7) | TTBCR_N;
    cp15_write_ttbcr(ttbcr);
    STATIC_ASSERT(1UL<<(32-TTBCR_N) == MEMORY_OFFSET, "");
    #undef TTBCR_N
    cp15_enable_mmu();
    cp15_enable_alignment();
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    mmu_enabled = true;
}