Exemple #1
0
static inline void mapit(union x86_32_pdir_entry *pdir_base,
                         union x86_32_ptable_entry *ptable_base,
                         lpaddr_t addr, uint64_t bitmap)
{
    if(!X86_32_IS_PRESENT(pdir_base)) {
        paging_x86_32_map_table(pdir_base,
                                mem_to_local_phys((lvaddr_t)ptable_base));
    }

    if(!X86_32_IS_PRESENT(ptable_base)) {
        debug(SUBSYS_PAGING, "mapped!\n");
    } else {
        //remap the page anyway, this is important for the memory latency benchmark
        debug(SUBSYS_PAGING, "already existing! remapping it\n");
    }

    paging_x86_32_map(ptable_base, addr, bitmap);
}
/// Map within a x86_32 pdir
static errval_t x86_32_pdir(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    //printf("x86_32_pdir\n");
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (slot + pte_count > X86_32_PTABLE_SIZE) {
        // check that mapping fits page directory
        return SYS_ERR_VM_MAP_SIZE;
    }

#ifndef CONFIG_PAE
    if(slot >= X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }
#endif

    // large page code
    if(src->type == ObjType_Frame || src->type == ObjType_DevFrame)
    {
        cslot_t last_slot = slot + pte_count;

        // check offset within frame
        if (offset + pte_count * X86_32_LARGE_PAGE_SIZE > get_size(src)) {
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        /* Calculate page access protection flags */
        // Get frame cap rights
        paging_x86_32_flags_t flags_large =
            paging_x86_32_cap_to_page_flags(src->rights);
        // Mask with provided access rights mask
        flags_large = paging_x86_32_mask_attrs(flags_large, X86_32_PTABLE_ACCESS(flags));
        // Add additional arch-specific flags
        flags_large |= X86_32_PTABLE_FLAGS(flags);
        // Unconditionally mark the page present
        flags_large |= X86_32_PTABLE_PRESENT;

        // Convert destination base address
        genpaddr_t dest_gp   = get_address(dest);
        lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
        lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
        // Convert source base address
        genpaddr_t src_gp   = get_address(src);
        lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
        // Set metadata
        create_mapping_cap(mapping_cte, src,
                           dest_lp + slot * sizeof(union x86_32_ptable_entry),
                           offset,
                           pte_count);

        for (; slot < last_slot; slot++, offset += X86_32_LARGE_PAGE_SIZE) {
            union x86_32_ptable_entry *entry =
                (union x86_32_ptable_entry *)dest_lv + slot;

            /* FIXME: Flush TLB if the page is already present
             * in the meantime, since we don't do this, we just assert that
             * we never reuse a VA mapping */
            if (X86_32_IS_PRESENT(entry)) {
                printf("Trying to map into an already present page NYI.\n");
                return SYS_ERR_VNODE_SLOT_INUSE;
            }

            // Carry out the page mapping
            paging_x86_32_map_large(entry, src_lp + offset, flags_large);
        }

        return SYS_ERR_OK;
    }

    if (src->type != ObjType_VNode_x86_32_ptable) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdir.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdir_entry *entry =
        (union x86_32_pdir_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdir_entry),
                       pte_count);


    // Source
    // XXX: offset is ignored
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_table(entry, src_lp);

    return SYS_ERR_OK;
}
Exemple #3
0
struct dcb *spawn_app_init(struct x86_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Construct cmdline args */
    // Core id of the core that booted this core
    char coreidchar[10];
    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

    // IPI channel id of core that booted this core
    char chanidchar[30];
    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

    // Arch id of the core that booted this core
    char archidchar[30];
    snprintf(archidchar, sizeof(archidchar), "archid=%d",
             core_data->src_arch_id);

    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
    int argc = 4;

#ifdef __scc__
    char urpc_frame_base_char[30];
    snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
             "frame=%" PRIuGENPADDR, core_data->urpc_frame_base);
    argv[argc++] = urpc_frame_base_char;
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
                                                  TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
                          core_data->urpc_frame_bits,
                          core_data->urpc_frame_bits, core_data->src_core_id,
                          urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i],
                   urpc_ptr + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    // elf load the domain
    genvaddr_t entry_point;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(core_data->monitor_binary),
                   core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = entry_point;

    return init_dcb;
}
Exemple #4
0
struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Only the first core can run this code */
    assert(apic_is_bsp());
    
    /* Allocate bootinfo */
    lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);

    /* Construct cmdline args */
    char bootinfochar[16];
    snprintf(bootinfochar, sizeof(bootinfochar), "%"PRIuLPADDR, BOOTINFO_BASE);

    const char *argv[6] = { "init", bootinfochar };
    int argc = 2;

#ifdef __scc__
    if(glbl_core_data->urpc_frame_base != 0) {
        char coreidchar[10];
        snprintf(coreidchar, sizeof(coreidchar), "%d",
                 glbl_core_data->src_core_id);
        argv[argc++] = coreidchar;

        char chan_id_char[30];
        snprintf(chan_id_char, sizeof(chan_id_char), "chanid=%"PRIu32,
                 glbl_core_data->chan_id);
        argv[argc++] = chan_id_char;

        char urpc_frame_base_char[30];
        snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
                 "frame=%" PRIuGENPADDR, glbl_core_data->urpc_frame_base);
        argv[argc++] = urpc_frame_base_char;
    }
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             bootinfo_phys, alloc_phys);

    /* Map bootinfo R/W into VSpace at vaddr 0x200000 (BOOTINFO_BASE) */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[0], mem_to_local_phys((lvaddr_t)init_pdir));
    paging_x86_32_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
    }
#else
    paging_x86_32_map_table(&init_pdir[0], mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[i + 512], bootinfo_phys + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
    }
#endif

    /* Load init ELF32 binary */
    struct multiboot_modinfo *module = multiboot_find_module(name);
    if (module == NULL) {
        panic("Could not find init module!");
    }
    genvaddr_t init_ep;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(module->mod_start),
                   MULTIBOOT_MODULE_SIZE(*module), &init_ep);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = init_ep;

    /* Create caps for init to use */
    create_module_caps(&spawn_state);
    lpaddr_t init_alloc_end = alloc_phys(0); // XXX
    create_phys_caps(init_alloc_end);

    /* Fill bootinfo struct */
    bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel

    /* for (int i = 0; i < bootinfo->regions_length; i++) { */
    /*     printf("%d region %d: 0x%09" PRIxPTR " - 0x%09lx (%lu MB, %u bits)\n", */
    /*            bootinfo->regions[i].mr_type, i, bootinfo->regions[i].mr_base, */
    /*            bootinfo->regions[i].mr_base + (1UL<<bootinfo->regions[i].mr_bits), */
    /*            bootinfo->regions[i].mr_bits >= 20 */
    /*            ? 1UL << (bootinfo->regions[i].mr_bits - 20) : 0, */
    /*            bootinfo->regions[i].mr_bits); */
    /* } */

#if 0
    // If app core, map (static) URPC channel
    if(kernel_scckernel != 0) {
        printf("SCC app kernel, frame at: 0x%x\n", kernel_scckernel);
#define TASKCN_SLOT_MON_URPC    (TASKCN_SLOTS_USER+6)   ///< Frame cap for urpc comm.

        err = caps_create_new(ObjType_Frame, kernel_scckernel, 13, 13,
                              caps_locate_slot(CNODE(taskcn), TASKCN_SLOT_MON_URPC));
        assert(err_is_ok(err));
    }
#endif

    return init_dcb;
}
Exemple #5
0
static struct dcb *spawn_init_common(struct spawn_state *st, const char *name,
                                     int argc, const char *argv[],
                                     lpaddr_t bootinfo_phys,
                                     alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Perform arch-independent spawn */
    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
                                        ARGS_BASE, alloc_phys, &paramaddr);

    /* Init page tables */
    init_page_tables(st, alloc_phys);

    /* Map dispatcher R/W into VSpace starting at vaddr 0x204000
     * (Starting after Bootinfo pages)*/
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < DISPATCHER_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(DISPATCHER_BASE) + i],
                   mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    struct dispatcher_shared_generic *init_disp =
        get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);

    registers_set_param(&init_disp_x86_32->enabled_save_area, paramaddr);

    // Map IO cap in task cnode
    struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
    err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
    assert(err_is_ok(err));

    /* Set fields in DCB */
    // Set Vspace
#ifdef CONFIG_PAE
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdpte);
#else
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdir);
#endif

    /* Initialize dispatcher */
    init_disp->disabled = true;
    strncpy(init_disp->name, argv[0], DISP_NAME_LEN);

    /* tell init the vspace addr of its dispatcher */
    init_disp->udisp = DISPATCHER_BASE;

    init_disp_x86_32->disabled_save_area.edi = DISPATCHER_BASE;
    init_disp_x86_32->disabled_save_area.fs = 0;
    init_disp_x86_32->disabled_save_area.gs = 0;
    init_disp_x86_32->disabled_save_area.cs = USER_CS;
    init_disp_x86_32->disabled_save_area.ss = USER_SS;
    init_disp_x86_32->disabled_save_area.eflags = USER_EFLAGS;
    
    return init_dcb;
}
Exemple #6
0
static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
{
    /* Allocate memory for init's page tables */
#ifdef CONFIG_PAE
    init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE
                                           * sizeof(union x86_32_pdpte_entry)));
#endif
    init_pdir = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * sizeof(union x86_32_pdir_entry)));
    init_ptable = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry)));

    /* Page table setup */
    /* Initialize init page tables */
    for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
        paging_x86_32_clear_pdir(&init_pdir[j]);
        for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
            paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]);
        }
    }
    /* Map pagetables into pageCN */
    int     pagecn_pagemap = 0;
#ifdef CONFIG_PAE
    // Map PDPTE into first slot in pagecn
    caps_create_new(ObjType_VNode_x86_32_pdpt,
                    mem_to_local_phys((lvaddr_t)init_pdpte),
                    BASE_PAGE_BITS, 0, my_core_id,
                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
#endif
    // Map PDIR into successive slots in pagecn
    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_pdir,
                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Map page tables into successive slots in pagecn
    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_ptable,
                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Connect all page tables to page directories.
    // init's memory manager expects page tables within the pagecn to
    // already be connected to the corresponding directories. To avoid
    // unneccessary special cases, we connect them here.
    for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT;
        vaddr += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base =
            &init_pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE +
                       X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE *
                         X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) *
                         X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)];

        paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base));
#else
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE +
                         X86_32_PTABLE_BASE(vaddr)];
#endif
        paging_x86_32_map_table(pdir_base,
                                mem_to_local_phys((lvaddr_t)ptable_base));
    }

    /* Switch to init's VSpace */
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir));
#endif

    /***** VSpace available *****/

    /* Map cmdline args R/W into VSpace at ARGS_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i],
                   st->args_page + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }
}