Example #1
0
/**
 * \brief Map a device into the kernel's address space.  
 * 
 * \param device_base is the physical address of the device
 * \param device_size is the number of bytes of physical address space
 * the device occupies. 
 *
 * \return the kernel virtual address of the mapped device, or panic. 
 */
lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
{
    // We map all hardware devices in the kernel using sections in the
    // top quarter (0xC0000000-0xFE000000) of the address space, just
    // below the exception vectors.  
    // 
    // It makes sense to use sections since (1) we don't map many
    // devices in the CPU driver anyway, and (2) if we did, it might
    // save a wee bit of TLB space. 
    //

    // First, we make sure that the device fits into a single
    // section. 
    if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
        panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
              dev_base, dev_size );
    }
    
    // Now, walk down the page table looking for either (a) an

    // existing mapping, in which case return the address the device
    // is already mapped to, or an invalid mapping, in which case map
    // it. 
    uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
    uint32_t dev_offset  = ARM_L1_SECTION_OFFSET(dev_base);
    lvaddr_t dev_virt    = 0;
    
    for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {

        // Work out the virtual address we're looking at
        dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);

        // If we already have a mapping for that address, return it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
             l1_high[i].section.base_address == dev_section ) {
            return dev_virt + dev_offset;
        }

        // Otherwise, if it's free, map it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
            map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
            cp15_invalidate_i_and_d_caches_fast();
            cp15_invalidate_tlb();
            return dev_virt + dev_offset;
        } 
    }
    // We're all out of section entries :-(
    panic("Ran out of section entries to map a kernel device");
}
Example #2
0
lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
{
    // HACK to put device in high memory.
    // Should likely track these allocations.
    static lvaddr_t dev_alloc = DEVICE_OFFSET;
    assert(device_bytes <= BYTES_PER_SECTION);
    dev_alloc -= BYTES_PER_SECTION;

    printf("paging_map_device_section: 0x%"PRIxLVADDR", 0x%"PRIxLVADDR", "
            "0x%"PRIxLPADDR".\n",
            (uintptr_t)aligned_kernel_l1_table, dev_alloc, device_base);

    paging_map_device_section((uintptr_t)aligned_kernel_l1_table, dev_alloc,
            device_base);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();

    return dev_alloc;
}
Example #3
0
/**
 * Create initial (temporary) page tables.
 *
 * We use 1MB (ARM_L1_SECTION_BYTES) pages (sections) with a single-level table.
 * This allows 1MB*4k (ARM_L1_MAX_ENTRIES) = 4G per pagetable.
 *
 * Hardware details can be found in:
 * ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
 *   B3: Virtual Memory System Architecture (VMSA)
 */
void paging_init(void)
{
    /**
     * Make sure our page tables are correctly aligned in memory
     */
    assert(ROUND_UP((lpaddr_t)l1_low, ARM_L1_ALIGN) == (lpaddr_t)l1_low);
    assert(ROUND_UP((lpaddr_t)l1_high, ARM_L1_ALIGN) == (lpaddr_t)l1_high);

    /**
     * On ARMv7-A, physical RAM (PHYS_MEMORY_START) is the same with the
     * offset of mapped physical memory within virtual address space
     * (PHYS_MEMORY_START). 
     */
    STATIC_ASSERT(MEMORY_OFFSET == PHYS_MEMORY_START, "");

    /**
     * Zero the page tables: this has the effect of marking every PTE
     * as invalid.
     */
    memset(&l1_low,  0, sizeof(l1_low));
    memset(&l1_high, 0, sizeof(l1_high));
    memset(&l2_vec,  0, sizeof(l2_vec));

    /**
     * Now we lay out the kernel's virtual address space.
     *
     * 00000000-7FFFFFFFF: 1-1 mappings (hardware we have not mapped
     *                     into high kernel space yet)
     * 80000000-BFFFFFFFF: 1-1 mappings (this is 1GB of RAM)
     * C0000000-FEFFFFFFF: On-demand mappings of hardware devices,
     *                     allocated descending from DEVICE_OFFSET.
     * FF000000-FFEFFFFFF: Unallocated.
     * FFF00000-FFFFFFFFF: L2 table, containing:
     *      FFF00000-FFFEFFFF: Unallocated
     *      FFFF0000-FFFFFFFF: Exception vectors
     */    
    lvaddr_t base = 0;
    size_t i;
    for (i=0, base = 0; i < ARM_L1_MAX_ENTRIES/2; i++) {
        map_kernel_section_lo(base, make_dev_section(base));
        base += ARM_L1_SECTION_BYTES;
    }
    for (i=0, base = MEMORY_OFFSET; i < ARM_L1_MAX_ENTRIES/4; i++) {
        map_kernel_section_hi(base, make_ram_section(base));
        base += ARM_L1_SECTION_BYTES;
    }

    /* Map the exception vectors. */
    map_vectors();

    /**
     * TTBCR: Translation Table Base Control register.
     *  TTBCR.N is bits[2:0]
     * In a TLB miss TTBCR.N determines whether TTBR0 or TTBR1 is used as the
     * base address for the translation table walk in memory:
     *  N == 0 -> always use TTBR0
     *  N >  0 -> if VA[31:32-N] > 0 use TTBR1 else use TTBR0
     *
     * TTBR0 is typically used for processes-specific addresses
     * TTBR1 is typically used for OS addresses that do not change on context
     *       switch
     *
     * set TTBCR.N = 1 to use TTBR1 for VAs >= MEMORY_OFFSET (=2GB)
     */
    assert(mmu_enabled == false);
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    cp15_write_ttbr1((lpaddr_t)l1_high);
    cp15_write_ttbr0((lpaddr_t)l1_low);
    #define TTBCR_N 1
    uint32_t ttbcr = cp15_read_ttbcr();
    ttbcr =  (ttbcr & ~7) | TTBCR_N;
    cp15_write_ttbcr(ttbcr);
    STATIC_ASSERT(1UL<<(32-TTBCR_N) == MEMORY_OFFSET, "");
    #undef TTBCR_N
    cp15_enable_mmu();
    cp15_enable_alignment();
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();
    mmu_enabled = true;
}