示例#1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // make sure low l2 pagetable is aligned to 1K after relocation
    aligned_low_l2_table = (union arm_l2_entry *)ROUND_UP((uintptr_t)low_l2_table, ARM_L2_ALIGN);

    // Re-map physical memory
    paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);

    // map first MB at granularity of 4K pages
    uint32_t l2_flags = ARM_L2_SMALL_USR_NONE | ARM_L2_SMALL_CACHEABLE | ARM_L2_SMALL_BUFFERABLE;
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, MEMORY_OFFSET,
                             mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    for(lpaddr_t pa=0; pa < ARM_L1_SECTION_BYTES; pa += BYTES_PER_PAGE)
    {
        lvaddr_t va = pa + MEMORY_OFFSET;
        paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(va)], pa, l2_flags);
    }

    // map high-mem relocated exception vector to corresponding page in low MB
    // core 0: 0xffff0000 -> 0x80000
    // core 1: 0xffff0000 -> 0x81000
    // ...
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
            mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    int core_id = hal_get_cpu_id();
    lpaddr_t addr = ETABLE_PHYS_BASE + core_id * BASE_PAGE_SIZE;
    paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(ETABLE_ADDR)], addr, l2_flags);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));

    cp15_invalidate_tlb();
}
示例#2
0
static struct dcb *spawn_init_common(const char *name,
                                     int argc, const char *argv[],
                                     lpaddr_t bootinfo_phys,
                                     alloc_phys_func alloc_phys,
                                     alloc_phys_aligned_func alloc_phys_aligned)
{
    struct dispatcher_shared_generic *disp;
    struct dispatcher_shared_aarch64 *disp_aarch64;

    MSG("spawn_init_common %s\n", name);

    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(&spawn_state, name, argc, argv,
                                        bootinfo_phys, INIT_ARGS_VBASE,
                                        alloc_phys, alloc_phys_aligned,
                                        &paramaddr);
    /* initialize page tables */
    init_page_tables();

    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l0);

    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_ARGS_VBASE,
                       spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);

    /* Map dispatcher */
    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_DISPATCHER_VBASE,
                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
                   INIT_PERM_RW);

    disp = get_dispatcher_shared_generic(init_dcb->disp);
    disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp);

    /* Initialize dispatcher */
    disp->disabled = true;
    strncpy(disp->name, argv[0], DISP_NAME_LEN);

    /* Tell init the vspace addr of its dispatcher. */
    disp->udisp = INIT_DISPATCHER_VBASE;

    /* TODO: write the contet ID for init */

    /* Set the thread ID register to point to the shared structure. */

    disp_aarch64->enabled_save_area.named.x0   = paramaddr;
    disp_aarch64->enabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_I_MASK;
    sysreg_write_tpidrro_el0((uint64_t)disp->udisp);

    dump_dispatcher(disp);

    return init_dcb;
}
示例#3
0
struct sysret sys_handle_kcb_identify(struct capability* to)
{
    // Return with physical base address of frame
    // XXX: pack size into bottom bits of base address
    assert(to->type == ObjType_KernelControlBlock);
    lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
    assert((vkcb & BASE_PAGE_MASK) == 0);

    return (struct sysret) {
        .error = SYS_ERR_OK,
        .value = mem_to_local_phys(vkcb) | OBJBITS_KCB,
    };
}

struct sysret sys_get_absolute_time(void)
{
    // Return kernel_now.
    // XXX: this may not provide all the properties of absolute time we want,
    // but should be sufficient to implement stuff that needs timing with 1/10
    // of a second accuracy range.
    return (struct sysret) {
        .error = SYS_ERR_OK,
        .value = kernel_now + kcb_current->kernel_off,
    };
}
示例#4
0
lvaddr_t paging_x86_32_map_special(lpaddr_t base, size_t size, uint64_t bitmap)
{
    // Allocate backwards from a page below end of address space
    static lvaddr_t vbase = (lvaddr_t)X86_32_VADDR_SPACE_SIZE;
    lpaddr_t addr;
    lvaddr_t vaddr;

    paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE);

    // Align physical base address
    lpaddr_t offset = base & (X86_32_MEM_PAGE_SIZE - 1);
    base -= offset;

    if(vbase - size < X86_32_VADDR_SPACE_SIZE - X86_32_DEVICE_SPACE_LIMIT) {
        return 0;
    }

    // Map pages, tables and directories (reverse order)
    for(vaddr = vbase - X86_32_MEM_PAGE_SIZE,
            addr = base + size - X86_32_MEM_PAGE_SIZE;
        vaddr >= vbase - size;
        vaddr -= X86_32_MEM_PAGE_SIZE, addr -= X86_32_MEM_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_ptable_entry *pdir_base =
            &mem_pdir[X86_32_PDPTE_BASE(mem_to_local_phys(vaddr))][X86_32_PDIR_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 2M device page: vaddr = 0x%x, addr = 0x%x, "
              "PDPTE_BASE = %u, PDIR_BASE = %u -- ", vaddr,
              addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr));
        mapit(pdpte_base, pdir_base, addr, bitmap);
#else
#       ifdef CONFIG_PSE
        union x86_32_ptable_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4M device page: vaddr = 0x%x, addr = 0x%x, "
              "PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr));
        mapit(pdir_base, addr, bitmap);
#       else
        union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)][X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K device page: vaddr = 0x%"PRIxLVADDR", "
              "addr = 0x%"PRIxLPADDR", "
              "PDIR_BASE = %"PRIxLPADDR", PTABLE_BASE = %"PRIxLPADDR", pdir = %p, ptable = %p -- ",
              vaddr, addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr), pdir,
              mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)]);
        mapit(pdir_base, ptable_base, addr, bitmap);
#       endif
#endif
    }

    vbase -= size;
    return vbase + offset;
}
示例#5
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_x86_32_reset(void)
{
    // Re-map physical memory
    // XXX: Map in what we get from Multiboot. We should actually map
    // stuff dynamically, whenever raw mem gets retyped into a kernel
    // object
/*     if(paging_map_memory(0, multiboot_info->mem_upper * 1024 + 0x100000) */
    lpaddr_t lpaddr = gen_phys_to_local_phys(X86_32_PADDR_SPACE_LIMIT -
                                             X86_32_DEVICE_SPACE_LIMIT);
    if(paging_x86_32_map_memory(0, lpaddr) != 0) {
        panic("error while mapping physical memory!");
    }

    // Switch to new page layout
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdir));
#endif
}
示例#6
0
文件: paging.c 项目: joe9/barrelfish
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void
paging_k1om_reset(void)
{
    // Map kernel image so we don't lose ground
    if (paging_k1om_map_memory(mem_to_local_phys((lvaddr_t) &_start_kernel),
                               SIZE_KERNEL_IMAGE)
        != 0) {
        panic("error while mapping physical memory!");
    }

    // Map an initial amount of memory
    if (paging_k1om_map_memory(0, K1OM_KERNEL_INIT_MEMORY) != 0) {
        panic("error while mapping physical memory!");
    }

    if (paging_k1om_map_memory(XEON_PHI_SBOX_BASE, XEON_PHI_SBOX_SIZE) != 0) {
        panic("error while mapping physical memory!");
    }

    // Switch to new page layout
    paging_k1om_context_switch(mem_to_local_phys((lvaddr_t) pml4));
}
示例#7
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_x86_64_reset(void)
{
    // Map kernel image so we don't lose ground
    if(paging_x86_64_map_memory(mem_to_local_phys((lvaddr_t)&_start_kernel),
                                SIZE_KERNEL_IMAGE) != 0) {
        panic("error while mapping physical memory!");
    }

    // Map an initial amount of memory
    if(paging_x86_64_map_memory(0, KERNEL_INIT_MEMORY) != 0) {
        panic("error while mapping physical memory!");
    }
    
#ifdef __k1om__
    /* mapping the Xeon Phi SBOX registers to provide serial input */
    if (paging_x86_64_map_memory(XEON_PHI_SBOX_BASE, XEON_PHI_SBOX_SIZE) != 0) {
        panic("error while mapping physical memory!");
    }
#endif

    // Switch to new page layout
    paging_x86_64_context_switch(mem_to_local_phys((lvaddr_t)pml4));
}
示例#8
0
static inline void mapit(union x86_64_pdir_entry *pml4_base,
                         union x86_64_pdir_entry *pdpt_base,
                         union x86_64_ptable_entry *pdir_base, lpaddr_t addr,
                         uint64_t bitmap)
{
    if(!X86_64_IS_PRESENT(pml4_base)) {
        paging_x86_64_map_table(pml4_base,
                                mem_to_local_phys((lvaddr_t)pdpt_base));
    }

    if(!X86_64_IS_PRESENT(pdpt_base)) {
        paging_x86_64_map_table(pdpt_base,
                                mem_to_local_phys((lvaddr_t)pdir_base));
    }

    if(!X86_64_IS_PRESENT(pdir_base)) {
        debug(SUBSYS_PAGING, "mapped!\n");
        paging_x86_64_map_large(pdir_base, addr, bitmap);
    } else {
//remap the page anyway, this is important for the memory latency benchmark
        debug(SUBSYS_PAGING, "already existing! remapping it\n");
        paging_x86_64_map_large(pdir_base, addr, bitmap);
    }
}
示例#9
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP(
            (uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // Re-map physical memory
    //
    paging_map_memory((uintptr_t)aligned_kernel_l1_table , paddr, bytes);

    //map high-mem relocated exception vector to kernel section
    paging_map_kernel_section((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
                              PHYS_MEMORY_START);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_tlb();
}
示例#10
0
lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
{
    // HACK to put device in high memory.
    // Should likely track these allocations.
    static lvaddr_t dev_alloc = DEVICE_OFFSET;
    assert(device_bytes <= BYTES_PER_SECTION);
    dev_alloc -= BYTES_PER_SECTION;

    printf("paging_map_device_section: 0x%"PRIxLVADDR", 0x%"PRIxLVADDR", "
            "0x%"PRIxLPADDR".\n",
            (uintptr_t)aligned_kernel_l1_table, dev_alloc, device_base);

    paging_map_device_section((uintptr_t)aligned_kernel_l1_table, dev_alloc,
            device_base);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
    cp15_invalidate_i_and_d_caches_fast();
    cp15_invalidate_tlb();

    return dev_alloc;
}
示例#11
0
/*
 * \brief Initialzie page tables
 *
 * This includes setting up page tables for the init process.
 */
static void init_page_tables(void)
{
    // Create page table for init
    if(hal_cpu_is_bsp()) {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    } else {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    }

    printf("init_page_tables done: init_l1=%p init_l2=%p\n",
            init_l1, init_l2);

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * ARM has:
     *
     * L1 has 4096 entries (16KB).
     * L2 Coarse has 256 entries (256 * 4B = 1KB).
     *
     * CPU driver currently fakes having 1024 entries in L1 and
     * L2 with 1024 entries by treating a page as 4 consecutive
     * L2 tables and mapping this as a unit in L1.
     */
    caps_create_new(ObjType_VNode_ARM_l1,
                    mem_to_local_phys((lvaddr_t)init_l1),
                    vnode_objbits(ObjType_VNode_ARM_l1), 0,
                    caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                    );

    //STARTUP_PROGRESS();

    // Map L2 into successive slots in pagecn
    size_t i;
    for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
        size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
        assert(objbits_vnode == BASE_PAGE_BITS);
        caps_create_new(
                        ObjType_VNode_ARM_l2,
                        mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
                        objbits_vnode, 0,
                        caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                        );
    }

    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
    for (lvaddr_t vaddr = INIT_VBASE;
         vaddr < INIT_SPACE_LIMIT;
         vaddr += ARM_L1_SECTION_BYTES) {
        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
    }

    printf("Calling paging_context_switch with address = %"PRIxLVADDR"\n",
           mem_to_local_phys((lvaddr_t) init_l1));
    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
}
示例#12
0
static struct dcb *spawn_init_common(const char *name,
                                     int argc, const char *argv[],
                                     lpaddr_t bootinfo_phys,
                                     alloc_phys_func alloc_phys)
{
    printf("spawn_init_common %s\n", name);

    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(&spawn_state, name,
                                        argc, argv,
                                        bootinfo_phys, INIT_ARGS_VBASE,
                                        alloc_phys, &paramaddr);

    init_page_tables();

    printf("about to call mem_to_local_phys with lvaddr=%"PRIxLVADDR"\n",
           init_l1);

    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);

    spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE,
                   spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);


    // Map dispatcher
    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
                   INIT_PERM_RW);


    /*
     * we create the capability to the devices at this stage and store it
     * in the TASKCN_SLOT_IO, where on x86 the IO capability is stored for
     * device access on PCI. PCI is not available on the pandaboard so this
     * should not be a problem.
     */
    struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
    errval_t  err = caps_create_new(ObjType_DevFrame, 0x40000000, 30, 30, iocap);
        assert(err_is_ok(err));

    struct dispatcher_shared_generic *disp
        = get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_arm *disp_arm
        = get_dispatcher_shared_arm(init_dcb->disp);

    /* Initialize dispatcher */
    disp->disabled = true;
    strncpy(disp->name, argv[0], DISP_NAME_LEN);

    /* tell init the vspace addr of its dispatcher */
    disp->udisp = INIT_DISPATCHER_VBASE;

    disp_arm->enabled_save_area.named.r0   = paramaddr;
#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
    disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
#endif
    disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
    disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;

    printf("spawn_init_common: starting from=%"PRIxLVADDR"\n");

    return init_dcb;
}
示例#13
0
/**
 * \brief Cleanup the last cap copy for an object and the object itself
 */
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
    errval_t err;

    TRACE_CAP_MSG("cleaning up last copy", cte);
    struct capability *cap = &cte->cap;

    assert(!has_copies(cte));
    if (cte->mdbnode.remote_copies) {
        printk(LOG_WARN, "cleanup_last but remote_copies is set\n");
    }

    if (ret_ram_cap && ret_ram_cap->cap.type != ObjType_Null) {
        return SYS_ERR_SLOT_IN_USE;
    }

    struct RAM ram = { .bits = 0 };
    size_t len = sizeof(struct RAM) / sizeof(uintptr_t) + 1;

    if (!has_descendants(cte) && !has_ancestors(cte)) {
        // List all RAM-backed capabilities here
        // NB: ObjType_PhysAddr and ObjType_DevFrame caps are *not* RAM-backed!
        switch(cap->type) {
        case ObjType_RAM:
            ram.base = cap->u.ram.base;
            ram.bits = cap->u.ram.bits;
            break;

        case ObjType_Frame:
            ram.base = cap->u.frame.base;
            ram.bits = cap->u.frame.bits;
            break;

        case ObjType_CNode:
            ram.base = cap->u.cnode.cnode;
            ram.bits = cap->u.cnode.bits + OBJBITS_CTE;
            break;

        case ObjType_Dispatcher:
            // Convert to genpaddr
            ram.base = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)cap->u.dispatcher.dcb));
            ram.bits = OBJBITS_DISPATCHER;
            break;

        default:
            // Handle VNodes here
            if(type_is_vnode(cap->type)) {
                ram.base = get_address(cap);
                ram.bits = vnode_objbits(cap->type);
            }
            break;
        }
    }

    err = cleanup_copy(cte);
    if (err_is_fail(err)) {
        return err;
    }

    if(ram.bits > 0) {
        // Send back as RAM cap to monitor
        if (ret_ram_cap) {
            if (dcb_current != monitor_ep.u.endpoint.listener) {
                printk(LOG_WARN, "sending fresh ram cap to non-monitor?\n");
            }
            assert(ret_ram_cap->cap.type == ObjType_Null);
            ret_ram_cap->cap.u.ram = ram;
            ret_ram_cap->cap.type = ObjType_RAM;
            err = mdb_insert(ret_ram_cap);
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
            assert(err_is_ok(err));
            // note: this is a "success" code!
            err = SYS_ERR_RAM_CAP_CREATED;
        }
        else if (monitor_ep.type && monitor_ep.u.endpoint.listener != 0) {
#ifdef TRACE_PMEM_CAPS
            struct cte ramcte;
            memset(&ramcte, 0, sizeof(ramcte));
            ramcte.cap.u.ram = ram;
            ramcte.cap.type = ObjType_RAM;
            TRACE_CAP_MSG("reclaimed", ret_ram_cap);
#endif
            // XXX: This looks pretty ugly. We need an interface.
            err = lmp_deliver_payload(&monitor_ep, NULL,
                                      (uintptr_t *)&ram,
                                      len, false);
        }
        else {
            printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
        }
        if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
            printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
            err = SYS_ERR_OK;

        } else {
            assert(err_is_ok(err));
        }
    }

    return err;
}

/*
 * Mark phase of revoke mark & sweep
 */

static void caps_mark_revoke_copy(struct cte *cte)
{
    errval_t err;
    err = caps_try_delete(cte);
    if (err_is_fail(err)) {
        // this should not happen as there is a copy of the cap
        panic("error while marking/deleting cap copy for revoke:"
              " 0x%"PRIuERRV"\n", err);
    }
}
示例#14
0
/**
 * @param Entry point to architecture specific initialization
 *
 * @param magic     Magic value to tell the kernel it was started by multiboot
 * @param pointer   Pointer to the multiboot structure
 * @param stack     Pointer to the stack
 *
 * ASSUMPTIONS:
 *   - the execution starts in HIGH addresses (e.g. > KERNEL_OFFSET)
 *   - Pointers to stack and multiboot structures point to HIGH memory
 *   - ARM exception level is EL1 (privileged)
 */
void
arch_init(uint32_t magic, void *pointer, uintptr_t stack) {
    global = &global_temp;
    memset(&global->locks, 0, sizeof(global->locks));

    switch (magic) {
    case MULTIBOOT2_BOOTLOADER_MAGIC:
        {
        my_core_id = 0;

        struct multiboot_header *mbhdr = pointer;
        uint32_t size = mbhdr->header_length;

        // sanity checks
        assert(mbhdr->architecture == MULTIBOOT_ARCHITECTURE_AARCH64);
        assert((mbhdr->architecture + mbhdr->checksum + mbhdr->header_length
                 + mbhdr->magic) == 0);

        struct multiboot_header_tag *mb;
        struct multiboot_tag_string *kernel_cmd;

        // get the first header tag
        mb = (struct multiboot_header_tag *)(mbhdr + 1);

        // get the kernel cmdline. this may contain address which UART/GIC to use
        kernel_cmd = multiboot2_find_cmdline(mb, size);
        if (kernel_cmd == NULL) {
            panic("Multiboot did not contain an kernel CMD line\n");
        }

        // parse the cmdline
        parse_commandline(kernel_cmd->string, cmdargs);

        // initialize the serial console.
        serial_init(serial_console_port, false);
//        serial_console_init(false);

        struct multiboot_tag_efi_mmap *mmap = (struct multiboot_tag_efi_mmap *)
                multiboot2_find_header(mb, size, MULTIBOOT_TAG_TYPE_EFI_MMAP);
        if (!mmap) {
            panic("Multiboot image does not have EFI mmap!");
        } else {
            printf("Found EFI mmap: %p\n", mmap);
        }

        mmap_find_memory(mmap);

        armv8_glbl_core_data->multiboot_image.base  = mem_to_local_phys((lvaddr_t) mb);
        armv8_glbl_core_data->multiboot_image.length = size;
        armv8_glbl_core_data->efi_mmap = mem_to_local_phys((lvaddr_t) mmap);

        armv8_glbl_core_data->cpu_driver_stack = stack;

        kernel_stack = stack;
        kernel_stack_top = stack + 16 - KERNEL_STACK_SIZE;
        break;
    }
    case ARMV8_BOOTMAGIC_PSCI :
        //serial_init(serial_console_port, false);

        serial_init(serial_console_port, false);

        struct armv8_core_data *core_data = (struct armv8_core_data*)pointer;
        armv8_glbl_core_data = core_data;
        global = (struct global *)core_data->cpu_driver_globals_pointer;

        kernel_stack = stack;
        kernel_stack_top = local_phys_to_mem(core_data->cpu_driver_stack_limit);

        my_core_id = core_data->dst_core_id;

        MSG("ARMv8 Core magic...\n");

        break;
    default: {
        serial_init(serial_console_port, false);

        serial_console_putchar('x');
        serial_console_putchar('x');
        serial_console_putchar('\n');

        panic("Implement AP booting!");
        __asm volatile ("wfi":::);
        break;
    }
    }


    MSG("Barrelfish CPU driver starting on ARMv8\n");
    MSG("Global data at %p\n", global);
    MSG("Multiboot record at %p\n", pointer);
    MSG("Kernel stack at 0x%016" PRIxPTR ".. 0x%016" PRIxPTR "\n",
        kernel_stack_top, kernel_stack);
    MSG("Kernel first byte at 0x%" PRIxPTR "\n", &kernel_first_byte);

    MSG("Exception vectors (VBAR_EL1): %p\n", &vectors);
    sysreg_write_vbar_el1((uint64_t)&vectors);

    MSG("Setting coreboot spawn handler\n");
    coreboot_set_spawn_handler(CPU_ARM8, platform_boot_core);

    arm_kernel_startup(pointer);
    while (1) {
        __asm volatile ("wfi":::);
    }
}
示例#15
0
static struct dcb *spawn_init_common(struct spawn_state *st, const char *name,
                                     int argc, const char *argv[],
                                     lpaddr_t bootinfo_phys,
                                     alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Perform arch-independent spawn */
    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
                                        ARGS_BASE, alloc_phys, &paramaddr);

    /* Init page tables */
    init_page_tables(st, alloc_phys);

    /* Map dispatcher R/W into VSpace starting at vaddr 0x204000
     * (Starting after Bootinfo pages)*/
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < DISPATCHER_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(DISPATCHER_BASE) + i],
                   mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    struct dispatcher_shared_generic *init_disp =
        get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);

    registers_set_param(&init_disp_x86_32->enabled_save_area, paramaddr);

    // Map IO cap in task cnode
    struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
    err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
    assert(err_is_ok(err));

    /* Set fields in DCB */
    // Set Vspace
#ifdef CONFIG_PAE
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdpte);
#else
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdir);
#endif

    /* Initialize dispatcher */
    init_disp->disabled = true;
    strncpy(init_disp->name, argv[0], DISP_NAME_LEN);

    /* tell init the vspace addr of its dispatcher */
    init_disp->udisp = DISPATCHER_BASE;

    init_disp_x86_32->disabled_save_area.edi = DISPATCHER_BASE;
    init_disp_x86_32->disabled_save_area.fs = 0;
    init_disp_x86_32->disabled_save_area.gs = 0;
    init_disp_x86_32->disabled_save_area.cs = USER_CS;
    init_disp_x86_32->disabled_save_area.ss = USER_SS;
    init_disp_x86_32->disabled_save_area.eflags = USER_EFLAGS;
    
    return init_dcb;
}
示例#16
0
static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
{
    /* Allocate memory for init's page tables */
#ifdef CONFIG_PAE
    init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE
                                           * sizeof(union x86_32_pdpte_entry)));
#endif
    init_pdir = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * sizeof(union x86_32_pdir_entry)));
    init_ptable = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry)));

    /* Page table setup */
    /* Initialize init page tables */
    for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
        paging_x86_32_clear_pdir(&init_pdir[j]);
        for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
            paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]);
        }
    }
    /* Map pagetables into pageCN */
    int     pagecn_pagemap = 0;
#ifdef CONFIG_PAE
    // Map PDPTE into first slot in pagecn
    caps_create_new(ObjType_VNode_x86_32_pdpt,
                    mem_to_local_phys((lvaddr_t)init_pdpte),
                    BASE_PAGE_BITS, 0, my_core_id,
                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
#endif
    // Map PDIR into successive slots in pagecn
    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_pdir,
                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Map page tables into successive slots in pagecn
    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_ptable,
                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Connect all page tables to page directories.
    // init's memory manager expects page tables within the pagecn to
    // already be connected to the corresponding directories. To avoid
    // unneccessary special cases, we connect them here.
    for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT;
        vaddr += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base =
            &init_pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE +
                       X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE *
                         X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) *
                         X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)];

        paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base));
#else
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE +
                         X86_32_PTABLE_BASE(vaddr)];
#endif
        paging_x86_32_map_table(pdir_base,
                                mem_to_local_phys((lvaddr_t)ptable_base));
    }

    /* Switch to init's VSpace */
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir));
#endif

    /***** VSpace available *****/

    /* Map cmdline args R/W into VSpace at ARGS_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i],
                   st->args_page + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }
}
示例#17
0
/// Setup the module cnode, which contains frame caps to all multiboot modules
void create_module_caps(struct spawn_state *st)
{
    errval_t err;

    /* Create caps for multiboot modules */
    struct multiboot_header_tag *multiboot =
        (struct multiboot_header_tag *)local_phys_to_mem(armv8_glbl_core_data->multiboot_image.base);

    // Allocate strings area
    lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
    lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
    lvaddr_t mmstrings = mmstrings_base;

    // create cap for strings area in first slot of modulecn
    assert(st->modulecn_slot == 0);
    err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
                          BASE_PAGE_SIZE, my_core_id,
                          caps_locate_slot(CNODE(st->modulecn),
                                           st->modulecn_slot++));
    assert(err_is_ok(err));

    //Nag
    bootinfo->regions_length = 0;

    /* Walk over multiboot modules, creating frame caps */
    size_t position = 0;
    size_t size = armv8_glbl_core_data->multiboot_image.length;

    struct mem_region *region;

    lpaddr_t acpi_base = (lpaddr_t)-1;
    /* add the ACPI regions */
    struct multiboot_tag_new_acpi *acpi_new;
    acpi_new = (struct multiboot_tag_new_acpi *)
           multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_ACPI_NEW);
    if (acpi_new) {
        acpi_base = mem_to_local_phys((lvaddr_t)&acpi_new->rsdp[0]);
    } else {
        struct multiboot_tag_old_acpi *acpi_old;
        acpi_old = (struct multiboot_tag_old_acpi *)
           multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_ACPI_OLD);
        if (acpi_old) {
            acpi_base = mem_to_local_phys((lvaddr_t)&acpi_old->rsdp[0]);
        }
    }

    if (acpi_base != (lpaddr_t)-1) {
        region = &bootinfo->regions[bootinfo->regions_length++];
        region->mr_base = acpi_base;
        region->mr_type = RegionType_ACPI_TABLE;
    }

    /* add the module regions */
    position = 0;
    struct multiboot_tag_module_64 *module = (struct multiboot_tag_module_64 *)
            multiboot2_find_header(multiboot, size, MULTIBOOT_TAG_TYPE_MODULE_64);
    while (module) {
        // Set memory regions within bootinfo
        region = &bootinfo->regions[bootinfo->regions_length++];

        genpaddr_t remain = module->mod_end - module->mod_start;
        genpaddr_t base_addr = local_phys_to_gen_phys(module->mod_start);
        region->mr_type = RegionType_Module;
        region->mr_base = base_addr;
        region->mrmod_slot = st->modulecn_slot;  // first slot containing caps
        region->mrmod_size = remain;  // size of image _in bytes_
        region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area

        // round up to page size for caps
        remain = ROUND_UP(remain, BASE_PAGE_SIZE);
        assert((base_addr & BASE_PAGE_MASK) == 0);
        assert((remain & BASE_PAGE_MASK) == 0);

        assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap));
        // create as DevFrame cap to avoid zeroing memory contents
        err = caps_create_new(ObjType_DevFrame, base_addr, remain,
                              remain, my_core_id,
                              caps_locate_slot(CNODE(st->modulecn),
                                               st->modulecn_slot++));
        assert(err_is_ok(err));

        // Copy multiboot module string to mmstrings area
        strcpy((char *)mmstrings, module->cmdline);
        mmstrings += strlen(module->cmdline) + 1;
        assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);

        module = ((void *) module) + module->size;
        position += module->size;
        module = (struct multiboot_tag_module_64 *) multiboot2_find_header(
                (struct multiboot_header_tag *) module, size - position,
                MULTIBOOT_TAG_TYPE_MODULE_64);
    }
}
示例#18
0
static void init_page_tables(void)
{
    lpaddr_t (*alloc_phys_aligned)(size_t size, size_t align);
    if (cpu_is_bsp()) {
        alloc_phys_aligned = bsp_alloc_phys_aligned;
    } else {
        alloc_phys_aligned = app_alloc_phys_aligned;
    }

    // Create page table for init
    const size_t l0_size = VMSAv8_64_PTABLE_NUM_ENTRIES * INIT_L0_SIZE * sizeof(union armv8_ttable_entry);
    init_l0 = (void *) local_phys_to_mem(alloc_phys_aligned(l0_size, VMSAv8_64_PTABLE_SIZE));
    memset(init_l0, 0, l0_size);

    const size_t l1_size = l0_size * INIT_L1_SIZE;
    init_l1 = (void *) local_phys_to_mem(alloc_phys_aligned(l1_size, VMSAv8_64_PTABLE_SIZE));
    memset(init_l1, 0, l1_size);

    const size_t l2_size = l1_size * INIT_L2_SIZE;
    init_l2 = (void *) local_phys_to_mem(alloc_phys_aligned(l2_size, VMSAv8_64_PTABLE_SIZE));
    memset(init_l2, 0, l2_size);

    const size_t l3_size = l2_size * INIT_L3_SIZE;
    init_l3 = (void *) local_phys_to_mem(alloc_phys_aligned(l3_size, VMSAv8_64_PTABLE_SIZE));
    memset(init_l3, 0, l3_size);

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * AARCH64 has:
     *
     * L0 has 1 entry.
     * L1 has 1 entry.
     * L2 Coarse has 16 entries (512 * 8B = 4KB).
     * L3 Coarse has 16*512 entries (512 * 8B = 4KB).
     *
     */

    printk(LOG_NOTE, "init page tables: l0=%p, l1=%p, l2=%p, l3=%p\n",
            init_l0, init_l1, init_l2, init_l3);

    caps_create_new(
            ObjType_VNode_AARCH64_l0,
            mem_to_local_phys((lvaddr_t)init_l0),
            vnode_objsize(ObjType_VNode_AARCH64_l0), 0,
                        my_core_id,
            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
    );

    for (size_t i = 0; i < INIT_L1_SIZE; i++) {
        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l1);
        assert(objsize_vnode == BASE_PAGE_SIZE);
        caps_create_new(
                ObjType_VNode_AARCH64_l1,
                mem_to_local_phys((lvaddr_t)init_l1) + (i * objsize_vnode),
                objsize_vnode, 0, my_core_id,
                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
        );
    }

    //STARTUP_PROGRESS();
    for(size_t i = 0; i < INIT_L2_SIZE; i++) {
        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
        assert(objsize_vnode == BASE_PAGE_SIZE);
        caps_create_new(
                ObjType_VNode_AARCH64_l2,
                mem_to_local_phys((lvaddr_t)init_l2) + (i * objsize_vnode),
                objsize_vnode, 0, my_core_id,
                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
        );
    }

    // Map L3 into successive slots in pagecn
    for(size_t i = 0; i < INIT_L3_SIZE; i++) {
        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l3);
        assert(objsize_vnode == BASE_PAGE_SIZE);
        caps_create_new(
                ObjType_VNode_AARCH64_l3,
                mem_to_local_phys((lvaddr_t)init_l3) + (i * objsize_vnode),
                objsize_vnode, 0,
                my_core_id,
                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
        );
    }

    /*
     * Initialize init page tables - this just wires the L0
     * entries through to the corresponding L1 entries.
     */
    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
        vaddr < ARMV8_INIT_SPACE_LIMIT;
        vaddr += VMSAv8_64_L0_SIZE)
    {
        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L0_SIZE;
        uintptr_t l1_off = section * VMSAv8_64_PTABLE_SIZE;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l1) + l1_off;
        paging_map_table_l0(init_l0, vaddr, paddr);
    }
    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
        vaddr < ARMV8_INIT_SPACE_LIMIT;
        vaddr += VMSAv8_64_L1_BLOCK_SIZE)
    {
        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L1_BLOCK_SIZE;
        uintptr_t l2_off = section * VMSAv8_64_PTABLE_SIZE;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_table_l1(init_l1, vaddr, paddr);
    }

    /*
     * Initialize init page tables - this just wires the L2
     * entries through to the corresponding L3 entries.
     */
    STATIC_ASSERT(0 == (ARMV8_INIT_VBASE % VMSAv8_64_L2_BLOCK_SIZE), "");
    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
        vaddr < ARMV8_INIT_SPACE_LIMIT;
        vaddr += VMSAv8_64_L2_BLOCK_SIZE)
    {
        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L2_BLOCK_SIZE;
        uintptr_t l3_off = section * VMSAv8_64_PTABLE_SIZE;

        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l3) + l3_off;

        paging_map_table_l2(init_l2, vaddr, paddr);
    }

}
示例#19
0
struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Only the first core can run this code */
    assert(apic_is_bsp());
    
    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%"PRIuLPADDR, BOOTINFO_BASE);

    const char *argv[6] = { "init", bootinfochar };
    int argc = 2;

#ifdef __scc__
    if(glbl_core_data->urpc_frame_base != 0) {
        char coreidchar[10];
        snprintf(coreidchar, sizeof(coreidchar), "%d",
                 glbl_core_data->src_core_id);
        argv[argc++] = coreidchar;

        char chan_id_char[30];
        snprintf(chan_id_char, sizeof(chan_id_char), "chanid=%"PRIu32,
                 glbl_core_data->chan_id);
        argv[argc++] = chan_id_char;

        char urpc_frame_base_char[30];
        snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
                 "frame=%" PRIuGENPADDR, glbl_core_data->urpc_frame_base);
        argv[argc++] = urpc_frame_base_char;
    }
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             bootinfo_phys, alloc_phys);

    /* Map bootinfo R/W into VSpace at vaddr 0x200000 (BOOTINFO_BASE) */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[0], mem_to_local_phys((lvaddr_t)init_pdir));
    paging_x86_32_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
    }
#else
    paging_x86_32_map_table(&init_pdir[0], mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[i + 512], bootinfo_phys + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
    }
#endif

    /* Load init ELF32 binary */
    struct multiboot_modinfo *module = multiboot_find_module(name);
    if (module == NULL) {
        panic("Could not find init module!");
    }
    genvaddr_t init_ep;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(module->mod_start),
                   MULTIBOOT_MODULE_SIZE(*module), &init_ep);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = init_ep;

    /* Create caps for init to use */
    create_module_caps(&spawn_state);
    lpaddr_t init_alloc_end = alloc_phys(0); // XXX
    create_phys_caps(init_alloc_end);

    /* Fill bootinfo struct */
    bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel

    /* for (int i = 0; i < bootinfo->regions_length; i++) { */
    /*     printf("%d region %d: 0x%09" PRIxPTR " - 0x%09lx (%lu MB, %u bits)\n", */
    /*            bootinfo->regions[i].mr_type, i, bootinfo->regions[i].mr_base, */
    /*            bootinfo->regions[i].mr_base + (1UL<<bootinfo->regions[i].mr_bits), */
    /*            bootinfo->regions[i].mr_bits >= 20 */
    /*            ? 1UL << (bootinfo->regions[i].mr_bits - 20) : 0, */
    /*            bootinfo->regions[i].mr_bits); */
    /* } */

#if 0
    // If app core, map (static) URPC channel
    if(kernel_scckernel != 0) {
        printf("SCC app kernel, frame at: 0x%x\n", kernel_scckernel);
#define TASKCN_SLOT_MON_URPC    (TASKCN_SLOTS_USER+6)   ///< Frame cap for urpc comm.

        err = caps_create_new(ObjType_Frame, kernel_scckernel, 13, 13,
                              caps_locate_slot(CNODE(taskcn), TASKCN_SLOT_MON_URPC));
        assert(err_is_ok(err));
    }
#endif

    return init_dcb;
}
示例#20
0
struct dcb *spawn_app_init(struct x86_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Construct cmdline args */
    // Core id of the core that booted this core
    char coreidchar[10];
    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

    // IPI channel id of core that booted this core
    char chanidchar[30];
    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

    // Arch id of the core that booted this core
    char archidchar[30];
    snprintf(archidchar, sizeof(archidchar), "archid=%d",
             core_data->src_arch_id);

    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
    int argc = 4;

#ifdef __scc__
    char urpc_frame_base_char[30];
    snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
             "frame=%" PRIuGENPADDR, core_data->urpc_frame_base);
    argv[argc++] = urpc_frame_base_char;
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
                                                  TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
                          core_data->urpc_frame_bits,
                          core_data->urpc_frame_bits, core_data->src_core_id,
                          urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i],
                   urpc_ptr + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    // elf load the domain
    genvaddr_t entry_point;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(core_data->monitor_binary),
                   core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = entry_point;

    return init_dcb;
}
示例#21
0
static void
create_modules_from_initrd(struct bootinfo* bi,
                           const uint8_t*   initrd_base,
                           size_t           initrd_bytes)
{
    errval_t err;
    lvaddr_t mmstrings_base = 0;
    lvaddr_t mmstrings      = 0;

    // CPIO archive is crafted such that first file is
    // command-line strings for "modules" - ie menu.lst. The
    // subsequent file follow in the order they appear in
    // menu.lst.arm.
    const uint8_t* data;
    size_t bytes;

    if (cpio_get_file_by_name(initrd_base, initrd_bytes,
                              "menu.lst.modules",
                              &data, &bytes))
    {
        assert(bytes < BASE_PAGE_SIZE);

        mmstrings_base = alloc_mem(BASE_PAGE_SIZE);
        mmstrings      = mmstrings_base;

        STARTUP_PROGRESS();

        // Create cap for strings area in first slot of modulecn
        err = caps_create_new(
                  ObjType_Frame,
                  mem_to_local_phys(mmstrings_base),
                  BASE_PAGE_BITS, BASE_PAGE_BITS,
                  my_core_id,
                  caps_locate_slot(
                      CNODE(spawn_state.modulecn),
                      spawn_state.modulecn_slot++)
                  );
        assert(err_is_ok(err));

        STARTUP_PROGRESS();

        // Copy strings from file into allocated page
        memcpy((void*)mmstrings_base, data, bytes);
        ((char*)mmstrings_base)[bytes] = '\0';

        STARTUP_PROGRESS();

        // Skip first line (corresponds to bootscript in archive)
        strtok((char*)mmstrings_base, "\r\n");

        STARTUP_PROGRESS();

        assert(bi->regions_length == 0);
        int ord = 1;
        const char* name;
        while ((mmstrings = (lvaddr_t)strtok(NULL, "\r\n")) != 0)
        {
            if (!cpio_get_file_by_ordinal(initrd_base, initrd_bytes, ord,
                                          &name, &data, &bytes))
            {
                panic("Failed to find file\n");
            }
            ord++;

            debug(SUBSYS_STARTUP,
                  "Creating caps for \"%s\" (Command-line \"%s\")\n",
                   name, (char*)mmstrings);

            // Copy file from archive into RAM.
            // TODO: Give up archive space.
            size_t   pa_bytes = round_up(bytes, BASE_PAGE_SIZE);
            lpaddr_t pa       = alloc_phys(pa_bytes);
            memcpy((void*)local_phys_to_mem(pa), data, bytes);

            struct mem_region* region = &bi->regions[bi->regions_length++];
            region->mr_type    = RegionType_Module;
            region->mrmod_slot = spawn_state.modulecn_slot;
            region->mrmod_size = pa_bytes;
            region->mrmod_data = mmstrings - mmstrings_base;

            assert((pa & BASE_PAGE_MASK) == 0);
            assert((pa_bytes & BASE_PAGE_MASK) == 0);

            while (pa_bytes != 0)
            {
                assert(spawn_state.modulecn_slot
                       < (1UL << spawn_state.modulecn->cap.u.cnode.bits));
                // create as DevFrame cap to avoid zeroing memory contents
                err = caps_create_new(
                          ObjType_DevFrame, pa, BASE_PAGE_BITS,
                          BASE_PAGE_BITS,
                          my_core_id,
                          caps_locate_slot(
                              CNODE(spawn_state.modulecn),
                              spawn_state.modulecn_slot++)
                          );
                assert(err_is_ok(err));
                pa       += BASE_PAGE_SIZE;
                pa_bytes -= BASE_PAGE_SIZE;
            }
        }
    }
    else
    {
        panic("No command-line file.\n");
    }
}
示例#22
0
void ipi_notify_init(void)
{
    my_arch_id = apic_get_id();
    // Publish the address of the notify page in the global kernel state
    global->notify[my_arch_id] = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)my_notify_page));
}
示例#23
0
spawn_init(const char*      name,
           int32_t          kernel_id,
           const uint8_t*   initrd_base,
           size_t           initrd_bytes)
{
    assert(0 == kernel_id);

    // Create page table for init

    init_l1 =  (uintptr_t*)alloc_mem_aligned(INIT_L1_BYTES, ARM_L1_ALIGN);
    memset(init_l1, 0, INIT_L1_BYTES);

    init_l2 = (uintptr_t*)alloc_mem_aligned(INIT_L2_BYTES, ARM_L2_ALIGN);
    memset(init_l2, 0, INIT_L2_BYTES);

    STARTUP_PROGRESS();

    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    STARTUP_PROGRESS();

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
    const char *argv[] = { "init", bootinfochar };

    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(&spawn_state, name,
                                        ARRAY_LENGTH(argv), argv,
                                        bootinfo_phys, INIT_ARGS_VBASE,
                                        alloc_phys, &paramaddr);

    STARTUP_PROGRESS();

    /*
     * Create a capability that allows user-level applications to
     * access device memory. This capability will be passed to Kaluga,
     * split up into smaller pieces and distributed to among device
     * drivers.
     *
     * For armv5, this is currently a dummy capability. We do not
     * have support for user-level device drivers in gem5 yet, so we
     * do not allocate any memory as device memory. Some cap_copy
     * operations in the bootup code fail if this capability is not
     * present.
     */
    struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
    errval_t  err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
    assert(err_is_ok(err));

    struct dispatcher_shared_generic *disp
        = get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_arm *disp_arm
        = get_dispatcher_shared_arm(init_dcb->disp);
    assert(NULL != disp);

    STARTUP_PROGRESS();

    /* Initialize dispatcher */
    disp->udisp = INIT_DISPATCHER_VBASE;

    STARTUP_PROGRESS();
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);

    STARTUP_PROGRESS();

    /* Page table setup */

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * ARM has:
     *
     * L1 has 4096 entries (16KB).
     * L2 Coarse has 256 entries (256 * 4B = 1KB).
     *
     * CPU driver currently fakes having 1024 entries in L1 and
     * L2 with 1024 entries by treating a page as 4 consecutive
     * L2 tables and mapping this as a unit in L1.
     */
    caps_create_new(
        ObjType_VNode_ARM_l1,
        mem_to_local_phys((lvaddr_t)init_l1),
            vnode_objbits(ObjType_VNode_ARM_l1), 0,
            my_core_id,
            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
        );

    STARTUP_PROGRESS();

    // Map L2 into successive slots in pagecn
    size_t i;
    for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
        size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
        assert(objbits_vnode == BASE_PAGE_BITS);
        caps_create_new(
            ObjType_VNode_ARM_l2,
            mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
            objbits_vnode, 0,
            my_core_id,
            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
            );
    }

    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
    for (lvaddr_t vaddr = INIT_VBASE; vaddr < INIT_SPACE_LIMIT; vaddr += ARM_L1_SECTION_BYTES)
    {
        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
    }

    paging_make_good((lvaddr_t)init_l1, INIT_L1_BYTES);

    STARTUP_PROGRESS();

    printf("XXX: Debug print to make Bram's code work\n");

    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));

    STARTUP_PROGRESS();

    // Map cmdline arguments in VSpace at ARGS_BASE
    STATIC_ASSERT(0 == (ARGS_SIZE % BASE_PAGE_SIZE), "");

    STARTUP_PROGRESS();

    spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE,
                   spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);

    STARTUP_PROGRESS();

    // Map bootinfo
    spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE,
                   bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW);

    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };

    genvaddr_t init_ep, got_base;
    load_init_image(&l2_info, initrd_base, initrd_bytes, &init_ep, &got_base);

    // Set startup arguments (argc, argv)
    disp_arm->enabled_save_area.named.r0   = paramaddr;
    disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
    disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
    disp_arm->enabled_save_area.named.r10  = got_base;

    disp_arm->got_base = got_base;

    struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE;
    bootinfo->regions_length = 0;

    STARTUP_PROGRESS();

    create_modules_from_initrd(bootinfo, initrd_base, initrd_bytes);
    debug(SUBSYS_STARTUP, "used %"PRIuCSLOT" slots in modulecn\n", spawn_state.modulecn_slot);

    STARTUP_PROGRESS();
    create_phys_caps(&spawn_state.physaddrcn->cap, bootinfo);

    STARTUP_PROGRESS();

    bootinfo->mem_spawn_core  = ~0;     // Size of kernel if bringing up others

    // Map dispatcher
    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
                   INIT_PERM_RW);

    STARTUP_PROGRESS();

    // NB libbarrelfish initialization sets up the stack.
    disp_arm->disabled_save_area.named.pc   = init_ep;
    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
    disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
    disp_arm->disabled_save_area.named.r10  = got_base;

#ifdef __XSCALE__
    cp15_disable_cache();
#endif

    printf("Kernel ready.\n");

    pit_start();

    // On to userland...
    STARTUP_PROGRESS();
    dispatch(init_dcb);

    panic("Not reached.");
}