Ejemplo n.º 1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // make sure low l2 pagetable is aligned to 1K after relocation
    aligned_low_l2_table = (union arm_l2_entry *)ROUND_UP((uintptr_t)low_l2_table, ARM_L2_ALIGN);

    // Re-map physical memory
    paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);

    // map first MB at granularity of 4K pages
    uint32_t l2_flags = ARM_L2_SMALL_USR_NONE | ARM_L2_SMALL_CACHEABLE | ARM_L2_SMALL_BUFFERABLE;
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, MEMORY_OFFSET,
                             mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    for(lpaddr_t pa=0; pa < ARM_L1_SECTION_BYTES; pa += BYTES_PER_PAGE)
    {
        lvaddr_t va = pa + MEMORY_OFFSET;
        paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(va)], pa, l2_flags);
    }

    // map high-mem relocated exception vector to corresponding page in low MB
    // core 0: 0xffff0000 -> 0x80000
    // core 1: 0xffff0000 -> 0x81000
    // ...
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
            mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    int core_id = hal_get_cpu_id();
    lpaddr_t addr = ETABLE_PHYS_BASE + core_id * BASE_PAGE_SIZE;
    paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(ETABLE_ADDR)], addr, l2_flags);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));

    cp15_invalidate_tlb();
}
Ejemplo n.º 2
0
/*
 * \brief Initialzie page tables
 *
 * This includes setting up page tables for the init process.
 */
static void init_page_tables(void)
{
    // Create page table for init
    if(hal_cpu_is_bsp()) {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    } else {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    }

    printf("init_page_tables done: init_l1=%p init_l2=%p\n",
            init_l1, init_l2);

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * ARM has:
     *
     * L1 has 4096 entries (16KB).
     * L2 Coarse has 256 entries (256 * 4B = 1KB).
     *
     * CPU driver currently fakes having 1024 entries in L1 and
     * L2 with 1024 entries by treating a page as 4 consecutive
     * L2 tables and mapping this as a unit in L1.
     */
    caps_create_new(ObjType_VNode_ARM_l1,
                    mem_to_local_phys((lvaddr_t)init_l1),
                    vnode_objbits(ObjType_VNode_ARM_l1), 0,
                    caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                    );

    //STARTUP_PROGRESS();

    // Map L2 into successive slots in pagecn
    size_t i;
    for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
        size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
        assert(objbits_vnode == BASE_PAGE_BITS);
        caps_create_new(
                        ObjType_VNode_ARM_l2,
                        mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
                        objbits_vnode, 0,
                        caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                        );
    }

    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
    for (lvaddr_t vaddr = INIT_VBASE;
         vaddr < INIT_SPACE_LIMIT;
         vaddr += ARM_L1_SECTION_BYTES) {
        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
    }

    printf("Calling paging_context_switch with address = %"PRIxLVADDR"\n",
           mem_to_local_phys((lvaddr_t) init_l1));
    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
}
Ejemplo n.º 3
0
spawn_init(const char*      name,
           int32_t          kernel_id,
           const uint8_t*   initrd_base,
           size_t           initrd_bytes)
{
    assert(0 == kernel_id);

    // Create page table for init

    init_l1 =  (uintptr_t*)alloc_mem_aligned(INIT_L1_BYTES, ARM_L1_ALIGN);
    memset(init_l1, 0, INIT_L1_BYTES);

    init_l2 = (uintptr_t*)alloc_mem_aligned(INIT_L2_BYTES, ARM_L2_ALIGN);
    memset(init_l2, 0, INIT_L2_BYTES);

    STARTUP_PROGRESS();

    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    STARTUP_PROGRESS();

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
    const char *argv[] = { "init", bootinfochar };

    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(&spawn_state, name,
                                        ARRAY_LENGTH(argv), argv,
                                        bootinfo_phys, INIT_ARGS_VBASE,
                                        alloc_phys, &paramaddr);

    STARTUP_PROGRESS();

    /*
     * Create a capability that allows user-level applications to
     * access device memory. This capability will be passed to Kaluga,
     * split up into smaller pieces and distributed to among device
     * drivers.
     *
     * For armv5, this is currently a dummy capability. We do not
     * have support for user-level device drivers in gem5 yet, so we
     * do not allocate any memory as device memory. Some cap_copy
     * operations in the bootup code fail if this capability is not
     * present.
     */
    struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
    errval_t  err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
    assert(err_is_ok(err));

    struct dispatcher_shared_generic *disp
        = get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_arm *disp_arm
        = get_dispatcher_shared_arm(init_dcb->disp);
    assert(NULL != disp);

    STARTUP_PROGRESS();

    /* Initialize dispatcher */
    disp->udisp = INIT_DISPATCHER_VBASE;

    STARTUP_PROGRESS();
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);

    STARTUP_PROGRESS();

    /* Page table setup */

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * ARM has:
     *
     * L1 has 4096 entries (16KB).
     * L2 Coarse has 256 entries (256 * 4B = 1KB).
     *
     * CPU driver currently fakes having 1024 entries in L1 and
     * L2 with 1024 entries by treating a page as 4 consecutive
     * L2 tables and mapping this as a unit in L1.
     */
    caps_create_new(
        ObjType_VNode_ARM_l1,
        mem_to_local_phys((lvaddr_t)init_l1),
            vnode_objbits(ObjType_VNode_ARM_l1), 0,
            my_core_id,
            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
        );

    STARTUP_PROGRESS();

    // Map L2 into successive slots in pagecn
    size_t i;
    for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
        size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
        assert(objbits_vnode == BASE_PAGE_BITS);
        caps_create_new(
            ObjType_VNode_ARM_l2,
            mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
            objbits_vnode, 0,
            my_core_id,
            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
            );
    }

    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
    for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES)
    {
        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
    }

    paging_make_good((lvaddr_t)init_l1, INIT_L1_BYTES);

    STARTUP_PROGRESS();

    printf("XXX: Debug print to make Bram's code work\n");

    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));

    STARTUP_PROGRESS();

    // Map cmdline arguments in VSpace at ARGS_BASE
    STATIC_ASSERT(0 == (ARGS_SIZE % BASE_PAGE_SIZE), "");

    STARTUP_PROGRESS();

    spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE,
                   spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);

    STARTUP_PROGRESS();

    // Map bootinfo
    spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE,
                   bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW);

    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };

    genvaddr_t init_ep, got_base;
    load_init_image(&l2_info, initrd_base, initrd_bytes, &init_ep, &got_base);

    // Set startup arguments (argc, argv)
    disp_arm->enabled_save_area.named.r0   = paramaddr;
    disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
    disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
    disp_arm->enabled_save_area.named.r10  = got_base;

    disp_arm->got_base = got_base;

    struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE;
    bootinfo->regions_length = 0;

    STARTUP_PROGRESS();

    create_modules_from_initrd(bootinfo, initrd_base, initrd_bytes);
    debug(SUBSYS_STARTUP, "used %"PRIuCSLOT" slots in modulecn\n", spawn_state.modulecn_slot);

    STARTUP_PROGRESS();
    create_phys_caps(&spawn_state.physaddrcn->cap, bootinfo);

    STARTUP_PROGRESS();

    bootinfo->mem_spawn_core  = ~0;     // Size of kernel if bringing up others

    // Map dispatcher
    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
                   INIT_PERM_RW);

    STARTUP_PROGRESS();

    // NB libbarrelfish initialization sets up the stack.
    disp_arm->disabled_save_area.named.pc   = init_ep;
    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
    disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
    disp_arm->disabled_save_area.named.r10  = got_base;

#ifdef __XSCALE__
    cp15_disable_cache();
#endif

    printf("Kernel ready.\n");

    pit_start();

    // On to userland...
    STARTUP_PROGRESS();
    dispatch(init_dcb);

    panic("Not reached.");
}