Ejemplo n.º 1
0
static void mmap_find_memory(struct multiboot_tag_efi_mmap *mmap)
{
    lpaddr_t physical_mem = 0;
    uint64_t pages = ARMV8_CORE_DATA_PAGES;

    for (size_t i = 0; i < mmap->size; i += mmap->descr_size) {
        efi_memory_descriptor *desc = (efi_memory_descriptor *)(mmap->efi_mmap + i);
        if (desc->Type == EfiConventionalMemory && desc->NumberOfPages > pages) {
            physical_mem = ROUND_UP(desc->PhysicalStart, BASE_PAGE_SIZE);
            pages = desc->NumberOfPages;
        }
    }

    if (!physical_mem) {
        panic("No free memory found!\n");
    }

    armv8_glbl_core_data = (void*) local_phys_to_mem(physical_mem);
    armv8_glbl_core_data->start_kernel_ram = physical_mem;
    armv8_glbl_core_data->start_free_ram = physical_mem + sizeof(*armv8_glbl_core_data);

    global = (void*) local_phys_to_mem(armv8_glbl_core_data->start_free_ram);

    // Construct the global structure
    memset(&global->locks, 0, sizeof(global->locks));

    armv8_glbl_core_data->start_free_ram += sizeof(*global);
    armv8_glbl_core_data->start_free_ram = ROUND_UP(armv8_glbl_core_data->start_free_ram, BASE_PAGE_SIZE);

}
Ejemplo n.º 2
0
void arm_kernel_startup(phys_mmap_t* mmap,
                        lpaddr_t     initrd_base,
                        size_t       initrd_bytes)
{
    g_phys_mmap = mmap;

    STARTUP_PROGRESS();

#ifdef __XSCALE__
    //Hardcoded because bootloader alter image if we pass the correct location
    //Size of image is obtained by header file which is generated during compilation
    initrd_base = 0x20000000;
    initrd_bytes = romfs_cpio_archive_size;
#endif

    const uint8_t* initrd_cpio_base = (uint8_t*)local_phys_to_mem(initrd_base);

    if (!cpio_archive_valid(initrd_cpio_base, initrd_bytes))
    {
                panic("Invalid initrd filesystem\n");
    }

    /* allocate initial KCB */
    kcb_current = (struct kcb *) local_phys_to_mem(alloc_phys(sizeof(*kcb_current)));
    memset(kcb_current, 0, sizeof(*kcb_current));
    assert(kcb_current);

    spawn_init(BSP_INIT_MODULE_NAME, 0, initrd_cpio_base, initrd_bytes);
}
Ejemplo n.º 3
0
void paging_dump_tables(struct dcb *dispatcher)
{
    lvaddr_t root_pt = local_phys_to_mem(dispatcher->vspace);

    // loop over pdpts
    union x86_64_ptable_entry *pt;
    size_t kernel_pml4e = X86_64_PML4_BASE(X86_64_MEMORY_OFFSET);
    for (int pdpt_index = 0; pdpt_index < kernel_pml4e; pdpt_index++) {
        union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + pdpt_index;
        if (!pdpt->raw) { continue; }
        genpaddr_t pdpt_gp = pdpt->d.base_addr << BASE_PAGE_BITS;
        lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));

        for (int pdir_index = 0; pdir_index < X86_64_PTABLE_SIZE; pdir_index++) {
            // get pdir
            union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + pdir_index;
            pt = (union x86_64_ptable_entry*)pdir;
            if (!pdir->raw) { continue; }
            // check if pdir or huge page
            if (pt->huge.always1) {
                // is huge page mapping
                genpaddr_t paddr = (genpaddr_t)pt->huge.base_addr << HUGE_PAGE_BITS;
                printf("%d.%d: 0x%"PRIxGENPADDR"\n", pdpt_index, pdir_index, paddr);
                // goto next pdpt entry
                continue;
            }
            genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
            lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));

            for (int ptable_index = 0; ptable_index < X86_64_PTABLE_SIZE; ptable_index++) {
                // get ptable
                union x86_64_pdir_entry *ptable = (union x86_64_pdir_entry *)pdir_lv + ptable_index;
                pt = (union x86_64_ptable_entry *)ptable;
                if (!ptable->raw) { continue; }
                // check if ptable or large page
                if (pt->large.always1) {
                    // is large page mapping
                    genpaddr_t paddr = (genpaddr_t)pt->large.base_addr << LARGE_PAGE_BITS;
                    printf("%d.%d.%d: 0x%"PRIxGENPADDR"\n", pdpt_index, pdir_index, ptable_index, paddr);
                    // goto next pdir entry
                    continue;
                }
                genpaddr_t ptable_gp = ptable->d.base_addr << BASE_PAGE_BITS;
                lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));

                for (int entry = 0; entry < X86_64_PTABLE_SIZE; entry++) {
                    union x86_64_ptable_entry *e =
                        (union x86_64_ptable_entry *)ptable_lv + entry;
                    genpaddr_t paddr = (genpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
                    if (!paddr) {
                        continue;
                    }
                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR"\n", pdpt_index, pdir_index, ptable_index, entry, paddr);
                }
            }
        }
    }
}
Ejemplo n.º 4
0
struct dcb *spawn_bsp_init(const char *name)
{
    MSG("spawning '%s' on BSP core\n", name);
    /* Only the first core can run this code */
    assert(cpu_is_bsp());

    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    /* store pointer to bootinfo in kernel virtual memory */
    bootinfo = (struct bootinfo *) local_phys_to_mem(bootinfo_phys);

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
    const char *argv[] = { "init", bootinfochar };
    int argc = 2;

    /* perform common spawning of init domain */
    struct dcb *init_dcb = spawn_init_common(name, argc, argv,bootinfo_phys,
            bsp_alloc_phys, bsp_alloc_phys_aligned);

    /* map boot info into init's VSPACE */
    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys,
                   BOOTINFO_SIZE, INIT_PERM_RW);

    /* load the image */
    genvaddr_t init_ep, got_base;
    struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE };
    load_init_image(&l3_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);

    MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n",
         init_ep, got_base);

    struct dispatcher_shared_aarch64 *disp_aarch64 =
            get_dispatcher_shared_aarch64(init_dcb->disp);

    /* setting GOT pointers */
    disp_aarch64->got_base = got_base;
    /* XXX - Why does the kernel do this? -DC */
    disp_aarch64->enabled_save_area.named.x10  = got_base;
    disp_aarch64->disabled_save_area.named.x10  = got_base;

    /* setting entry points */
    disp_aarch64->disabled_save_area.named.pc   = init_ep;
    disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;

    /* Create caps for init to use */
    create_module_caps(&spawn_state);
    lpaddr_t init_alloc_end = bsp_alloc_phys(0);
    create_phys_caps(armv8_glbl_core_data->start_kernel_ram, init_alloc_end);

    /* Fill bootinfo struct */
    bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel

    return init_dcb;
}
Ejemplo n.º 5
0
void paging_dump_tables(struct dcb *dispatcher)
{
    if (dispatcher->vspace > X86_32_PADDR_SPACE_LIMIT) {
        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
               dispatcher->vspace);
        return;
    }
    lvaddr_t root_pt = local_phys_to_mem(dispatcher->vspace);

#ifdef CONFIG_PAE
    // loop over pdpt entries
    for (int pdir_index = 0; pdir_index < X86_32_PDPTE_SIZE; pdir_index++) {
        // get pdir
        union x86_32_pdpte_entry *pdir = (union x86_32_pdpte_entry *)root_pt + pdir_index;
        if (!pdir->raw) { continue; }
        genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
        lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
#else
        int pdir_index = 0;
        lvaddr_t pdir_lv = root_pt;
#endif

        // only go to 512 because upper half of address space is kernel space
        // (1:1 mapped)
        // TODO: figure out what we need to do here for PAE
        for (int ptable_index = 0; ptable_index < 512; ptable_index++) {
            // get ptable
            union x86_32_pdir_entry *ptable = (union x86_32_pdir_entry *)pdir_lv + ptable_index;
            union x86_32_ptable_entry *large = (union x86_32_ptable_entry *)ptable;
            if (!ptable->raw) { continue; }
            if (large->large.always1) {
                // large page
                genpaddr_t paddr = large->large.base_addr << X86_32_LARGE_PAGE_BITS;
                printf("%d.%d: 0x%"PRIxGENPADDR"\n", pdir_index,
                        ptable_index, paddr);
            }
            genpaddr_t ptable_gp = ptable->d.base_addr << BASE_PAGE_BITS;
            lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));

            for (int entry = 0; entry < X86_32_PTABLE_SIZE; entry++) {
                union x86_32_ptable_entry *e =
                    (union x86_32_ptable_entry *)ptable_lv + entry;
                genpaddr_t paddr = (genpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
                if (!paddr) {
                    continue;
                }
                printf("%d.%d.%d: 0x%"PRIxGENPADDR"\n", pdir_index, ptable_index, entry, paddr);
            }
        }
#ifdef CONFIG_PAE
    } // endfor PDPT entries
#endif
}
Ejemplo n.º 6
0
void paging_dump_tables(struct dcb *dispatcher)
{
    if (!local_phys_is_valid(dispatcher->vspace)) {
        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
               dispatcher->vspace);
        return;
    }
    lvaddr_t l0 = local_phys_to_mem(dispatcher->vspace);

    for (int l0_index = 0; l0_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l0_index++) {
        // get level0 table
        union armv8_ttable_entry *l0_e = (union armv8_ttable_entry *) l0 + l0_index;
        if (!l0_e->raw) {
            continue;
        }
        genpaddr_t l1_gp = (genpaddr_t)(l0_e->d.base) << BASE_PAGE_BITS;
        lvaddr_t l1 = local_phys_to_mem(gen_phys_to_local_phys(l1_gp));
        printf("l0 %d -> %p\n", l0_index, l1);

        for (int l1_index = 0; l1_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l1_index++) {
            // get level1 table
            union armv8_ttable_entry *l1_e = (union armv8_ttable_entry *)l1 + l1_index;
            if (!l1_e->raw) { continue; }
            genpaddr_t l2_gp = (genpaddr_t)(l1_e->d.base) << BASE_PAGE_BITS;
            lvaddr_t l2 = local_phys_to_mem(gen_phys_to_local_phys(l2_gp));
            printf("  l1 %d -> %p\n", l1_index, l2);

            for (int l2_index = 0; l2_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l2_index++) {
                // get level2 table
                union armv8_ttable_entry *l2_e = (union armv8_ttable_entry *)l2 + l2_index;
                if (!l2_e->raw) { continue; }
                genpaddr_t l3_gp = (genpaddr_t)(l2_e->d.base) << BASE_PAGE_BITS;
                lvaddr_t l3 = local_phys_to_mem(gen_phys_to_local_phys(l3_gp));
                printf("    l2 %d -> %p\n", l2_index, l3);

                for (int entry = 0; entry < VMSAv8_64_PTABLE_NUM_ENTRIES; entry++) {
                    union armv8_ttable_entry *e =
                        (union armv8_ttable_entry *)l3 + entry;
                    genpaddr_t paddr = (genpaddr_t)(e->page.base) << BASE_PAGE_BITS;
                    if (!paddr) {
                        continue;
                    }
                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" \n", l0_index, l1_index, l2_index, entry, paddr);
                }
            }
        }
    }
}
Ejemplo n.º 7
0
static errval_t
startup_alloc_init(
    void*      state,
    genvaddr_t gvbase,
    size_t     bytes,
    uint32_t   flags,
    void**     ret
    )
{
    const struct startup_l2_info* s2i = (const struct startup_l2_info*)state;

    lvaddr_t sv = round_down((lvaddr_t)gvbase, BASE_PAGE_SIZE);
    size_t   off = (lvaddr_t)gvbase - sv;
    lvaddr_t lv = round_up((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE);
    lpaddr_t pa;

    //STARTUP_PROGRESS();
    if(hal_cpu_is_bsp())
    	pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
    else
    	pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);

    if (lv > sv && (pa != 0))
    {
        spawn_init_map(s2i->l2_table, s2i->l2_base, sv,
                       pa, lv - sv, elf_to_l2_flags(flags));
        *ret = (void*)(local_phys_to_mem(pa) + off);
    }
    else
    {
        *ret = 0;
    }
    return SYS_ERR_OK;
}
Ejemplo n.º 8
0
struct sysret ipi_raise_notify(coreid_t coreid, uintptr_t chanid)
{
    char *notify_page = (char *)local_phys_to_mem(global->notify[coreid]);

    if (notify_page == NULL || coreid >= MAX_COREID) {
        printf("UMPNOTIFY ERROR!\n");
        return SYSRET(SYS_ERR_ILLEGAL_INVOCATION);
    }

    // Locate our private notification fifo and head ptr
    volatile uint64_t *fifo = (void *)&notify_page[my_arch_id * NOTIFY_FIFO_BYTES]; 
    uint64_t slot = notifyhead[coreid] % NOTIFY_FIFO_SIZE;

    // Make sure the next slot is empty
    if (fifo[slot] != 0) {
        panic("FULL");
    }

    // Update notify fifo
    fifo[slot] = (uint64_t)chanid;
    notifyhead[coreid]++;

    // Send IPI to dest kernel
    apic_send_std_ipi(coreid, xapic_none, APIC_INTER_CORE_VECTOR);

    return SYSRET(SYS_ERR_OK);
}
Ejemplo n.º 9
0
lvaddr_t paging_x86_64_map_device(lpaddr_t base, size_t size)
{
    if(paging_map_mem(base, size, DEVICE_PAGE_BITMAP) == 0) {
        return local_phys_to_mem(base);
    } else {
        return 0;
    }
}
Ejemplo n.º 10
0
static void
load_init_image(
    struct startup_l3_info* l3i,
    const char *name,
    genvaddr_t* init_ep,
    genvaddr_t* got_base
    )
{
    lvaddr_t elf_base;
    size_t elf_bytes;
    errval_t err;

    *init_ep = *got_base = 0;

    /* Load init ELF64 binary */
    struct multiboot_header_tag *multiboot =
            (struct multiboot_header_tag *) local_phys_to_mem(
                    armv8_glbl_core_data->multiboot_image.base);
    struct multiboot_tag_module_64 *module = multiboot2_find_module_64(
            multiboot, armv8_glbl_core_data->multiboot_image.length, name);
    if (module == NULL) {
        panic("Could not find init module!");
    }

    elf_base =  local_phys_to_mem(module->mod_start);
    elf_bytes = MULTIBOOT_MODULE_SIZE(*module);

    debug(SUBSYS_STARTUP, "load_init_image %p %08x\n", elf_base, elf_bytes);
    printf("load_init_image %p %08x\n", elf_base, elf_bytes);

    err = elf_load(EM_AARCH64, startup_alloc_init, l3i,
            elf_base, elf_bytes, init_ep);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of " BSP_INIT_MODULE_NAME " failed!\n");
    }

    // TODO: Fix application linkage so that it's non-PIC.
    struct Elf64_Shdr* got_shdr =
        elf64_find_section_header_name((lvaddr_t)elf_base, elf_bytes, ".got");
    if (got_shdr)
    {
        *got_base = got_shdr->sh_addr;
    }
}
Ejemplo n.º 11
0
/**
 * \brief Make a "good" PDE table out of a page table.
 *
 * A "good" PDE table is one that has all physical address space and
 * the kernel mapped in. This function modifies the passed PDE, based
 * at physical address 'base' accordingly. It does this by taking out
 * the corresponding entries of the kernel's pristine PDE table.
 *
 * \param base  Physical base address of PDE table to make "good".
 */
void paging_x86_32_make_good_pdir(lpaddr_t base)
{
#ifdef CONFIG_PSE
    union x86_32_ptable_entry  *newpdir =
        (union x86_32_ptable_entry *)local_phys_to_mem(base);
#else
    union x86_32_pdir_entry  *newpdir =
        (union x86_32_pdir_entry *)local_phys_to_mem(base);
#endif
    int                 i;

    debug(SUBSYS_PAGING, "Is now a PDE: table = 0x%" PRIxLPADDR "\n", base);

    // Map memory
    for(i = X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET); i < X86_32_PDIR_SIZE; i++) {
        newpdir[i] = pdir[i];
    }
}
Ejemplo n.º 12
0
struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
{
    printf("spawn_bsp_init\n");

    /* Only the first core can run this code */
    assert(hal_cpu_is_bsp());

    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
    const char *argv[] = { "init", bootinfochar };
    int argc = 2;

    struct dcb *init_dcb = spawn_init_common(name, argc, argv, bootinfo_phys,
            alloc_phys);

    // Map bootinfo
    spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE,
                   bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW);

    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };

    genvaddr_t init_ep, got_base;
    load_init_image(&l2_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);

    struct dispatcher_shared_arm *disp_arm
        = get_dispatcher_shared_arm(init_dcb->disp);
    disp_arm->enabled_save_area.named.r10  = got_base;
    disp_arm->got_base = got_base;

    disp_arm->disabled_save_area.named.pc   = init_ep;
#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
#endif
    disp_arm->disabled_save_area.named.r10  = got_base;

    /* Create caps for init to use */
    create_module_caps(&spawn_state);
    lpaddr_t init_alloc_end = alloc_phys(0); // XXX
    create_phys_caps(init_alloc_end);

    /* Fill bootinfo struct */
    bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel

    /*
    // Map dispatcher
    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
                   INIT_PERM_RW);
    disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
	*/
    return init_dcb;
}
Ejemplo n.º 13
0
/*
 * Boot secondary processors
 */
errval_t platform_boot_core(hwid_t target, genpaddr_t gen_entry, genpaddr_t context)
{
    printf("Invoking PSCI on: cpu=0x%lx, entry=0x%lx, context=0x%lx\n", target, gen_entry, context);
    struct armv8_core_data *cd = (struct armv8_core_data *)local_phys_to_mem(context);
    cd->page_table_root = armv8_TTBR1_EL1_rd(NULL);
    cd->cpu_driver_globals_pointer = (uintptr_t)global;
    __asm volatile("dsb   sy\n"
                   "dmb   sy\n"
                   "isb     \n");
    return psci_cpu_on(target, gen_entry, context);
}
Ejemplo n.º 14
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
                             size_t slot, size_t num_pages)
{
    assert(type_is_vnode(pgtable->type));
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%zd pages)\n", num_pages);

    // get page table entry data
    genpaddr_t paddr;

    read_pt_entry(pgtable, slot, &paddr, NULL, NULL);
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    // get virtual address of first page
    // TODO: error checking
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID
                   && leaf_pt->mapping_info.pte == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else {
            return err;
        }
    }

    if (num_pages != mapping->mapping_info.pte_count) {
        // want to unmap a different amount of pages than was mapped
        return SYS_ERR_VM_MAP_SIZE;
    }

    do_unmap(pt, slot, num_pages);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (num_pages > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    // update mapping info
    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));

    return SYS_ERR_OK;
}
Ejemplo n.º 15
0
/**
 * \brief Make a "good" PDPTE table out of a page table.
 *
 * A "good" PDPTE table is one that has all physical address space and
 * the kernel mapped in. This function modifies the passed PDPTE, based
 * at physical address 'base' accordingly. It does this by taking out
 * the corresponding entries of the kernel's pristine PDPTE table.
 *
 * \param base  Physical base address of PDPTE table to make "good".
 */
void paging_x86_32_make_good_pdpte(lpaddr_t base)
{
    union x86_32_pdpte_entry   *newpdpte =
        (union x86_32_pdpte_entry *)local_phys_to_mem(base);
    int                 i;

    debug(SUBSYS_PAGING, "Is now a PDPTE: table = 0x%x\n", base);
    // Map memory
    for(i = X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET); i < X86_32_PDPTE_SIZE; i++) {
        newpdpte[i] = pdpte[i];
    }
}
Ejemplo n.º 16
0
/*
 * Describe me
 */
void paging_make_good(lvaddr_t new_table_base, size_t new_table_bytes)
{
    assert(new_table_base >= MEMORY_OFFSET);
    assert(new_table_bytes == ARM_L1_ALIGN);
    assert(aligned(new_table_base, ARM_L1_ALIGN));

    lvaddr_t ttbr = local_phys_to_mem(cp15_read_ttbr0());
    size_t st = (MEMORY_OFFSET / ARM_L1_SECTION_BYTES) * ARM_L1_BYTES_PER_ENTRY;

    // Copy kernel pages (everything from MEMORY_OFFSET upwards)
    memcpy((void*)new_table_base + st, (void*)ttbr + st,
           ARM_L1_MAX_ENTRIES * ARM_L1_BYTES_PER_ENTRY - st);
}
Ejemplo n.º 17
0
static inline lvaddr_t get_leaf_ptable_for_vaddr(genvaddr_t vaddr)
{
    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);

    // get pdpt
    union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + X86_64_PML4_BASE(vaddr);
    if (!pdpt->raw) { return 0; }
    genpaddr_t pdpt_gp = pdpt->d.base_addr << BASE_PAGE_BITS;
    lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
    // get pdir
    union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + X86_64_PDPT_BASE(vaddr);
    if (!pdir->raw) { return 0; }
    genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
    lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
    // get ptable
    union x86_64_ptable_entry *ptable = (union x86_64_ptable_entry *)pdir_lv + X86_64_PDIR_BASE(vaddr);
    if (!ptable->raw) { return 0; }
    genpaddr_t ptable_gp = ptable->base.base_addr << BASE_PAGE_BITS;
    lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));

    return ptable_lv;
}
Ejemplo n.º 18
0
/**
 * \brief Make a "good" PML4 table out of a page table.
 *
 * A "good" PML4 table is one that has all physical address space and
 * the kernel mapped in. This function modifies the passed PML4, based
 * at physical address 'base' accordingly. It does this by taking out
 * the corresponding entries of the kernel's pristine PML4 table.
 *
 * \param base  Physical base address of PML4 table to make "good".
 */
void paging_x86_64_make_good_pml4(lpaddr_t base)
{
    union x86_64_pdir_entry *newpml4 =
        (union x86_64_pdir_entry *)local_phys_to_mem(base);
    int                 i;

        // XXX: Disabled till vaddr_t is figured out
    debug(SUBSYS_PAGING, "Is now a PML4: table = 0x%"PRIxLPADDR"\n", base);

    // Map memory
    for(i = X86_64_PML4_BASE(MEMORY_OFFSET); i < X86_64_PTABLE_SIZE; i++) {
        newpml4[i] = pml4[i];
    }
}
Ejemplo n.º 19
0
static inline void read_pt_entry(struct capability *pgtable, size_t slot,
                                 genpaddr_t *mapped_addr, lpaddr_t *pte,
                                 void **entry)
{
    assert(type_is_vnode(pgtable->type));

    genpaddr_t paddr;
    lpaddr_t pte_;
    void *entry_;

    genpaddr_t gp = get_address(pgtable);
    lpaddr_t lp = gen_phys_to_local_phys(gp);
    lvaddr_t lv = local_phys_to_mem(lp);

    // get paddr
    switch (pgtable->type) {
    case ObjType_VNode_x86_64_pml4:
    case ObjType_VNode_x86_64_pdpt:
    case ObjType_VNode_x86_64_pdir: {
        union x86_64_pdir_entry *e =
            (union x86_64_pdir_entry *)lv + slot;
        paddr = (lpaddr_t)e->d.base_addr << BASE_PAGE_BITS;
        entry_ = e;
        pte_ = lp + slot * sizeof(union x86_64_pdir_entry);
        break;
    }
    case ObjType_VNode_x86_64_ptable: {
        union x86_64_ptable_entry *e =
            (union x86_64_ptable_entry *)lv + slot;
        paddr = (lpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
        entry_ = e;
        pte_ = lp + slot * sizeof(union x86_64_ptable_entry);
        break;
    }
    default:
        assert(!"Should not get here");
    }

    if (mapped_addr) {
        *mapped_addr = paddr;
    }
    if (pte) {
        *pte = pte_;
    }
    if (entry) {
        *entry = entry_;
    }
}
Ejemplo n.º 20
0
/**
 * \brief Map a region of physical memory into physical memory address space.
 *
 * Maps the region of physical memory, based at base and sized size bytes
 * to the same-sized virtual memory region. All pages are flagged according to
 * bitmap. This function automatically fills the needed page directory entries
 * in the page hierarchy rooted at pml4. base and size will be made
 * page-aligned by this function.
 *
 * \param base          Physical base address of memory region
 * \param size          Size in bytes of memory region
 * \param bitmap        Bitmap of flags for page tables/directories
 *
 * \return 0 on success, -1 on error (out of range)
 */
static int
paging_map_mem(lpaddr_t base,
               size_t size,
               uint64_t bitmap)
{
    lvaddr_t vaddr, vbase = local_phys_to_mem(base);
    lpaddr_t addr;

    // Align given physical base address
    if (base & X86_64_MEM_PAGE_MASK) {
        base -= base & X86_64_MEM_PAGE_MASK;
    }

    paging_align(&vbase, &base, &size, X86_64_MEM_PAGE_SIZE);

    // Is mapped region out of range?
    assert(base + size <= (lpaddr_t)K1OM_PADDR_SPACE_LIMIT);
    if (base + size > (lpaddr_t) K1OM_PADDR_SPACE_LIMIT) {
        return -1;
    }

    // Map pages, tables and directories
    for (vaddr = vbase, addr = base; vaddr < vbase + size; vaddr +=
    X86_64_MEM_PAGE_SIZE, addr += X86_64_MEM_PAGE_SIZE) {
        union x86_64_pdir_entry *pml4_base = &pml4[X86_64_PML4_BASE(vaddr)];

        union x86_64_pdir_entry *pdpt_base =
                &mem_pdpt[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(vaddr)];

        union x86_64_ptable_entry *pdir_base =
                &mem_pdir[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(addr)][X86_64_PDIR_BASE(
                        vaddr)];

        debug(SUBSYS_PAGING,
              "Mapping 2M page: vaddr = 0x%"PRIxLVADDR"x, addr = 0x%lx, " "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu -- ",
              vaddr, addr, X86_64_PML4_BASE(vaddr), X86_64_PDPT_BASE(vaddr),
              X86_64_PDIR_BASE(vaddr));

        mapit(pml4_base, pdpt_base, pdir_base, addr, bitmap);
    }
    // XXX FIXME: get rid of this TBL flush code, or move it elsewhere
    // uint64_t cr3;
    // __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
    // __asm__ __volatile__("mov %0,%%cr3" :  : "a" (cr3));

    return 0;
}
Ejemplo n.º 21
0
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping)
{
    assert(type_is_vnode(pgtable->type));
    assert(type_is_mapping(mapping->cap.type));
    struct Frame_Mapping *info = &mapping->cap.u.frame_mapping;
    errval_t err;
    debug(SUBSYS_PAGING, "page_mappings_unmap(%hu pages)\n", info->pte_count);

    // calculate page table address
    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));

    cslot_t slot = info->entry;
    // get virtual address of first page
    genvaddr_t vaddr;
    bool tlb_flush_necessary = true;
    struct cte *leaf_pt = cte_for_cap(pgtable);
    err = compile_vaddr(leaf_pt, slot, &vaddr);
    if (err_is_fail(err)) {
        if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED && vaddr == 0) {
            debug(SUBSYS_PAGING, "unmapping in floating page table; not flushing TLB\n");
            tlb_flush_necessary = false;
        } else if (err_no(err) == SYS_ERR_VNODE_SLOT_INVALID) {
            debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
        } else {
            return err;
        }
    }

    do_unmap(pt, slot, info->pte_count);

    // flush TLB for unmapped pages if we got a valid virtual address
    // TODO: heuristic that decides if selective or full flush is more
    //       efficient?
    if (tlb_flush_necessary) {
        if (info->pte_count > 1 || err_is_fail(err)) {
            do_full_tlb_flush();
        } else {
            do_one_tlb_flush(vaddr);
        }
    }

    return SYS_ERR_OK;
}
Ejemplo n.º 22
0
/**
 * \brief Boot an arm app core
 *
 * \param core_id   ID of the core to try booting
 * \param entry     Entry address for new kernel in the destination
 *                  architecture's lvaddr_t
 *
 * \returns Zero on successful boot, non-zero (error code) on failure
 */
int start_aps_arm_start(uint8_t core_id, lvaddr_t entry)
{
    //printf("----> %s (%s:%d): core_id=%u entry=0x%lx\n",
    //       __FUNCTION__, __FILE__, __LINE__,
    //       core_id, entry);

    /* pointer to the pseudo-lock used to detect boot up of new core */
    volatile uint32_t *ap_wait = (uint32_t*)local_phys_to_mem(AP_WAIT_PHYS);
    *ap_wait = AP_STARTING_UP;
    cp15_invalidate_d_cache();

    // map AUX_CORE_BOOT section
    static lvaddr_t aux_core_boot = 0;
    if (aux_core_boot == 0)
        aux_core_boot = paging_map_device(AUX_CORE_BOOT_SECT, ARM_L1_SECTION_BYTES);

    volatile lvaddr_t *aux_core_boot_0, *aux_core_boot_1;
    // The AUX_CORE_BOOT_0 register is used to store the startup state
    aux_core_boot_0 = (void *)(aux_core_boot + AUX_CORE_BOOT_0_OFFSET);
    aux_core_boot_1 = (void *)(aux_core_boot + AUX_CORE_BOOT_1_OFFSET);

    //write entry address of new kernel to SYSFLAG reg
    // Set address where the other core should jump
    debug(SUBSYS_STARTUP, "setting AUX_CORE_BOOT_1 to 0x%"PRIxLVADDR"\n", entry);
    *aux_core_boot_1 = entry;

    // Tell ROM code to start other core
    debug(SUBSYS_STARTUP, "AUX_CORE_BOOT_0 |= 1<< 2\n");
    *aux_core_boot_0 |= 1 << 2;

    // send signal to app core to start
    debug(SUBSYS_STARTUP, "sending event to other core(s?)\n");
    __asm__ volatile ("SEV");

    debug(SUBSYS_STARTUP, "waiting for response\n");
    while (*aux_core_boot_0 != 2<<2)
        ;

    debug(SUBSYS_STARTUP, "booted CPU%hhu\n", core_id);

	return 0;
}
Ejemplo n.º 23
0
/// Map within a x86_32 pdpt
static errval_t x86_32_pdpt(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count > 1) { // disallow multiple pdpt mappings at a time
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_x86_32_pdir) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    if(slot >= X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdpt.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdpte_entry *entry =
        (union x86_32_pdpte_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdpte_entry),
                       pte_count);

    // Source
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_pdpte(entry, src_lp);
    paging_x86_32_context_switch(dcb_current->vspace); // To flush TLB

    return SYS_ERR_OK;
}
Ejemplo n.º 24
0
errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
                             uintptr_t pages, uintptr_t kpi_paging_flags)
{
    assert(type_is_mapping(mapping->type));
    struct Frame_Mapping *info = &mapping->u.frame_mapping;

    // check flags
    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

    /* Calculate location of page table entries we need to modify */
    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
        (info->entry + offset) * sizeof(union armv8_ttable_entry *);

    for (int i = 0; i < pages; i++) {
        union armv8_ttable_entry *entry =
            (union armv8_ttable_entry *)base + i;
        paging_set_flags(entry, kpi_paging_flags);
    }

    return paging_tlb_flush_range(cte_for_cap(mapping), 0, pages);
}
Ejemplo n.º 25
0
/// Create physical address range or RAM caps to unused physical memory
static void create_phys_caps(lpaddr_t reserved_start, lpaddr_t reserved_end)
{
    /* Walk multiboot MMAP structure, and create appropriate caps for memory */
    struct multiboot_tag_efi_mmap *mmap = (struct multiboot_tag_efi_mmap *)
            local_phys_to_mem(armv8_glbl_core_data->efi_mmap);

    lpaddr_t last_end_addr = 0;
    for (size_t i = 0; i < (mmap->size - sizeof(struct multiboot_tag_efi_mmap)) / mmap->descr_size; i++) {
        efi_memory_descriptor *desc = (efi_memory_descriptor *)(mmap->efi_mmap + mmap->descr_size * i);

        enum region_type region_type = RegionType_Max;
        switch(desc->Type) {
            case EfiConventionalMemory:
               region_type = RegionType_Empty;
               break;
            case EfiPersistentMemory :
                region_type = RegionType_Empty;
                break;
            case EfiACPIReclaimMemory :
                region_type = RegionType_PlatformData;
                break;
            default:
               region_type = RegionType_PlatformData;
           break;
        };

        if (last_end_addr < desc->PhysicalStart) {
            // create cap for gap in mmap
            create_phys_caps_region(reserved_start, reserved_end, last_end_addr, desc->PhysicalStart - last_end_addr, RegionType_PhyAddr);
        }
        last_end_addr = desc->PhysicalStart + desc->NumberOfPages * BASE_PAGE_SIZE;

        create_phys_caps_region(reserved_start, reserved_end, desc->PhysicalStart, desc->NumberOfPages * BASE_PAGE_SIZE, region_type);
    }

    size_t size = (1UL << 48) - last_end_addr;


    create_phys_caps_region(reserved_start, reserved_end, last_end_addr, size, RegionType_PhyAddr);
}
Ejemplo n.º 26
0
static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
{
    assert(type_is_vnode(pgtable->type));
    assert(paddr);

    genpaddr_t gp = get_address(pgtable);
    lpaddr_t lp = gen_phys_to_local_phys(gp);
    lvaddr_t lv = local_phys_to_mem(lp);

    switch (pgtable->type) {
        case ObjType_VNode_AARCH64_l0:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l1:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l2:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l3:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->page.base) << 12;
            return;
        }
        default:
            assert(!"Should not get here");
    }
}
Ejemplo n.º 27
0
struct dcb *spawn_app_init(struct arm_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
	errval_t err;

	/* Construct cmdline args */
	// Core id of the core that booted this core
	char coreidchar[10];
	snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

	// IPI channel id of core that booted this core
	char chanidchar[30];
	snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

	// Arch id of the core that booted this core
	char archidchar[30];
	snprintf(archidchar, sizeof(archidchar), "archid=%d",
			core_data->src_arch_id);

	const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
	int argc = 4;

    struct dcb *init_dcb = spawn_init_common(name, argc, argv,0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
    		TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
    		core_data->urpc_frame_bits,
    		core_data->urpc_frame_bits, urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
    spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE,
    			   INIT_PERM_RW);

    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };

    // elf load the domain
    genvaddr_t entry_point, got_base=0;
    err = elf_load(EM_ARM, startup_alloc_init, &l2_info,
    		local_phys_to_mem(core_data->monitor_binary),
    		core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
    	//err_print_calltrace(err);
    	panic("ELF load of init module failed!");
    }

    // TODO: Fix application linkage so that it's non-PIC.
    struct Elf32_Shdr* got_shdr =
    		elf32_find_section_header_name(local_phys_to_mem(core_data->monitor_binary),
    									   core_data->monitor_binary_size, ".got");
    if (got_shdr)
    {
    	got_base = got_shdr->sh_addr;
    }

    struct dispatcher_shared_arm *disp_arm =
    		get_dispatcher_shared_arm(init_dcb->disp);
    disp_arm->enabled_save_area.named.r10  = got_base;
    disp_arm->got_base = got_base;

    disp_arm->disabled_save_area.named.pc   = entry_point;
#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
#endif
    disp_arm->disabled_save_area.named.r10  = got_base;
    //disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;

    return init_dcb;
}
Ejemplo n.º 28
0
/*
 * \brief Initialzie page tables
 *
 * This includes setting up page tables for the init process.
 */
static void init_page_tables(void)
{
    // Create page table for init
    if(hal_cpu_is_bsp()) {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    } else {
        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
        memset(init_l1, 0, INIT_L1_BYTES);

        init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
        memset(init_l2, 0, INIT_L2_BYTES);
    }

    printf("init_page_tables done: init_l1=%p init_l2=%p\n",
            init_l1, init_l2);

    /* Map pagetables into page CN */
    int pagecn_pagemap = 0;

    /*
     * ARM has:
     *
     * L1 has 4096 entries (16KB).
     * L2 Coarse has 256 entries (256 * 4B = 1KB).
     *
     * CPU driver currently fakes having 1024 entries in L1 and
     * L2 with 1024 entries by treating a page as 4 consecutive
     * L2 tables and mapping this as a unit in L1.
     */
    caps_create_new(ObjType_VNode_ARM_l1,
                    mem_to_local_phys((lvaddr_t)init_l1),
                    vnode_objbits(ObjType_VNode_ARM_l1), 0,
                    caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                    );

    //STARTUP_PROGRESS();

    // Map L2 into successive slots in pagecn
    size_t i;
    for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
        size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
        assert(objbits_vnode == BASE_PAGE_BITS);
        caps_create_new(
                        ObjType_VNode_ARM_l2,
                        mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
                        objbits_vnode, 0,
                        caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
                        );
    }

    /*
     * Initialize init page tables - this just wires the L1
     * entries through to the corresponding L2 entries.
     */
    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
    for (lvaddr_t vaddr = INIT_VBASE;
         vaddr < INIT_SPACE_LIMIT;
         vaddr += ARM_L1_SECTION_BYTES) {
        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
    }

    printf("Calling paging_context_switch with address = %"PRIxLVADDR"\n",
           mem_to_local_phys((lvaddr_t) init_l1));
    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
}
Ejemplo n.º 29
0
/// Setup the module cnode, which contains frame caps to all multiboot modules
void create_module_caps(struct spawn_state *st)
{
    errval_t err;

    /* Create caps for multiboot modules */
    struct multiboot_modinfo *module =
        (struct multiboot_modinfo *)local_phys_to_mem(glbl_core_data->mods_addr);

    // Allocate strings area
    lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
    lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
    lvaddr_t mmstrings = mmstrings_base;

    // create cap for strings area in first slot of modulecn
    assert(st->modulecn_slot == 0);
    err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
                          BASE_PAGE_BITS,
                          caps_locate_slot(CNODE(st->modulecn),
                                           st->modulecn_slot++));
    assert(err_is_ok(err));

    /* Walk over multiboot modules, creating frame caps */
    for (int i = 0; i < glbl_core_data->mods_count; i++) {
        struct multiboot_modinfo *m = &module[i];

        // Set memory regions within bootinfo
        struct mem_region *region =
            &bootinfo->regions[bootinfo->regions_length++];

        genpaddr_t remain = MULTIBOOT_MODULE_SIZE(*m);
        genpaddr_t base_addr = local_phys_to_gen_phys(m->mod_start);

        region->mr_type = RegionType_Module;
        region->mr_base = base_addr;
        region->mrmod_slot = st->modulecn_slot;  // first slot containing caps
        region->mrmod_size = remain;  // size of image _in bytes_
        region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area

        // round up to page size for caps
        remain = ROUND_UP(remain, BASE_PAGE_SIZE);

        // Create max-sized caps to multiboot module in module cnode
        while (remain > 0) {
            assert((base_addr & BASE_PAGE_MASK) == 0);
            assert((remain & BASE_PAGE_MASK) == 0);

            // determine size of next chunk
            uint8_t block_size = bitaddralign(remain, base_addr);

            assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
            // create as DevFrame cap to avoid zeroing memory contents
            err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
                                  block_size,
                                  caps_locate_slot(CNODE(st->modulecn),
                                                   st->modulecn_slot++));
            assert(err_is_ok(err));

            // Advance by that chunk
            base_addr += ((genpaddr_t)1 << block_size);
            remain -= ((genpaddr_t)1 << block_size);
        }

        // Copy multiboot module string to mmstrings area
        strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
        mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1;
        assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
    }
}
Ejemplo n.º 30
0
/**
 * @param Entry point to architecture specific initialization
 *
 * @param magic     Magic value to tell the kernel it was started by multiboot
 * @param pointer   Pointer to the multiboot structure
 * @param stack     Pointer to the stack
 *
 * ASSUMPTIONS:
 *   - the execution starts in HIGH addresses (e.g. > KERNEL_OFFSET)
 *   - Pointers to stack and multiboot structures point to HIGH memory
 *   - ARM exception level is EL1 (privileged)
 */
void
arch_init(uint32_t magic, void *pointer, uintptr_t stack) {
    global = &global_temp;
    memset(&global->locks, 0, sizeof(global->locks));

    switch (magic) {
    case MULTIBOOT2_BOOTLOADER_MAGIC:
        {
        my_core_id = 0;

        struct multiboot_header *mbhdr = pointer;
        uint32_t size = mbhdr->header_length;

        // sanity checks
        assert(mbhdr->architecture == MULTIBOOT_ARCHITECTURE_AARCH64);
        assert((mbhdr->architecture + mbhdr->checksum + mbhdr->header_length
                 + mbhdr->magic) == 0);

        struct multiboot_header_tag *mb;
        struct multiboot_tag_string *kernel_cmd;

        // get the first header tag
        mb = (struct multiboot_header_tag *)(mbhdr + 1);

        // get the kernel cmdline. this may contain address which UART/GIC to use
        kernel_cmd = multiboot2_find_cmdline(mb, size);
        if (kernel_cmd == NULL) {
            panic("Multiboot did not contain an kernel CMD line\n");
        }

        // parse the cmdline
        parse_commandline(kernel_cmd->string, cmdargs);

        // initialize the serial console.
        serial_init(serial_console_port, false);
//        serial_console_init(false);

        struct multiboot_tag_efi_mmap *mmap = (struct multiboot_tag_efi_mmap *)
                multiboot2_find_header(mb, size, MULTIBOOT_TAG_TYPE_EFI_MMAP);
        if (!mmap) {
            panic("Multiboot image does not have EFI mmap!");
        } else {
            printf("Found EFI mmap: %p\n", mmap);
        }

        mmap_find_memory(mmap);

        armv8_glbl_core_data->multiboot_image.base  = mem_to_local_phys((lvaddr_t) mb);
        armv8_glbl_core_data->multiboot_image.length = size;
        armv8_glbl_core_data->efi_mmap = mem_to_local_phys((lvaddr_t) mmap);

        armv8_glbl_core_data->cpu_driver_stack = stack;

        kernel_stack = stack;
        kernel_stack_top = stack + 16 - KERNEL_STACK_SIZE;
        break;
    }
    case ARMV8_BOOTMAGIC_PSCI :
        //serial_init(serial_console_port, false);

        serial_init(serial_console_port, false);

        struct armv8_core_data *core_data = (struct armv8_core_data*)pointer;
        armv8_glbl_core_data = core_data;
        global = (struct global *)core_data->cpu_driver_globals_pointer;

        kernel_stack = stack;
        kernel_stack_top = local_phys_to_mem(core_data->cpu_driver_stack_limit);

        my_core_id = core_data->dst_core_id;

        MSG("ARMv8 Core magic...\n");

        break;
    default: {
        serial_init(serial_console_port, false);

        serial_console_putchar('x');
        serial_console_putchar('x');
        serial_console_putchar('\n');

        panic("Implement AP booting!");
        __asm volatile ("wfi":::);
        break;
    }
    }


    MSG("Barrelfish CPU driver starting on ARMv8\n");
    MSG("Global data at %p\n", global);
    MSG("Multiboot record at %p\n", pointer);
    MSG("Kernel stack at 0x%016" PRIxPTR ".. 0x%016" PRIxPTR "\n",
        kernel_stack_top, kernel_stack);
    MSG("Kernel first byte at 0x%" PRIxPTR "\n", &kernel_first_byte);

    MSG("Exception vectors (VBAR_EL1): %p\n", &vectors);
    sysreg_write_vbar_el1((uint64_t)&vectors);

    MSG("Setting coreboot spawn handler\n");
    coreboot_set_spawn_handler(CPU_ARM8, platform_boot_core);

    arm_kernel_startup(pointer);
    while (1) {
        __asm volatile ("wfi":::);
    }
}