コード例 #1
0
ファイル: paging.c プロジェクト: CoryXie/BarrelfishOS
lvaddr_t paging_x86_32_map_special(lpaddr_t base, size_t size, uint64_t bitmap)
{
    // Allocate backwards from a page below end of address space
    static lvaddr_t vbase = (lvaddr_t)X86_32_VADDR_SPACE_SIZE;
    lpaddr_t addr;
    lvaddr_t vaddr;

    paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE);

    // Align physical base address
    lpaddr_t offset = base & (X86_32_MEM_PAGE_SIZE - 1);
    base -= offset;

    if(vbase - size < X86_32_VADDR_SPACE_SIZE - X86_32_DEVICE_SPACE_LIMIT) {
        return 0;
    }

    // Map pages, tables and directories (reverse order)
    for(vaddr = vbase - X86_32_MEM_PAGE_SIZE,
            addr = base + size - X86_32_MEM_PAGE_SIZE;
        vaddr >= vbase - size;
        vaddr -= X86_32_MEM_PAGE_SIZE, addr -= X86_32_MEM_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_ptable_entry *pdir_base =
            &mem_pdir[X86_32_PDPTE_BASE(mem_to_local_phys(vaddr))][X86_32_PDIR_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 2M device page: vaddr = 0x%x, addr = 0x%x, "
              "PDPTE_BASE = %u, PDIR_BASE = %u -- ", vaddr,
              addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr));
        mapit(pdpte_base, pdir_base, addr, bitmap);
#else
#       ifdef CONFIG_PSE
        union x86_32_ptable_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4M device page: vaddr = 0x%x, addr = 0x%x, "
              "PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr));
        mapit(pdir_base, addr, bitmap);
#       else
        union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)][X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K device page: vaddr = 0x%"PRIxLVADDR", "
              "addr = 0x%"PRIxLPADDR", "
              "PDIR_BASE = %"PRIxLPADDR", PTABLE_BASE = %"PRIxLPADDR", pdir = %p, ptable = %p -- ",
              vaddr, addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr), pdir,
              mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)]);
        mapit(pdir_base, ptable_base, addr, bitmap);
#       endif
#endif
    }

    vbase -= size;
    return vbase + offset;
}
コード例 #2
0
ファイル: startup_arch.c プロジェクト: XuNazgul/cmpe295A
/**
 * \brief Map init user-space memory.
 *
 * This function maps pages of the init user-space module. It expects
 * the virtual base address 'vbase' of a program segment of the init executable,
 * its size 'size' and its ELF64 access control flags. It maps pages
 * to the sequential area of physical memory, given by 'base'. If you
 * want to allocate physical memory frames as you go, you better use
 * startup_alloc_init().
 *
 * \param vbase Virtual base address of program segment.
 * \param base  Physical base address of program segment.
 * \param size  Size of program segment in bytes.
 * \param flags ELF64 access control flags of program segment.
 */
errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size,
                          uint32_t flags)
{
    lvaddr_t vaddr;

    paging_align(&vbase, &base, &size, BASE_PAGE_SIZE);
    assert(vbase + size < X86_32_INIT_SPACE_LIMIT);

    // Map pages
    for(vaddr = vbase; vaddr < vbase + size;
        vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_ptable_entry *ptable_base = &init_ptable[
                    + X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE * X86_32_PTABLE_SIZE
                    + X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
                    + X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%x, base = 0x%x, "
              "PDPTE_BASE = %u, PDIR_BASE = %u, "
              "PTABLE_BASE = %u -- ", vaddr, base, X86_32_PDPTE_BASE(vaddr),
              X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
#else
        union x86_32_ptable_entry *ptable_base = &init_ptable[
                    X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
                    + X86_32_PTABLE_BASE(vaddr)];

        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR
                             ", base = 0x%"PRIxLPADDR", "
              "PDIR_BASE = %"PRIuLPADDR", "
              "PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr, base,
              X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
#endif

        if(!X86_32_IS_PRESENT(ptable_base)) {
            debug(SUBSYS_PAGING, "mapped!\n");
            paging_x86_32_map(ptable_base, base,
                       INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags));
        } else {
            debug(SUBSYS_PAGING, "already existing!\n");
        }
    }

    return SYS_ERR_OK;
}
コード例 #3
0
ファイル: paging.c プロジェクト: CoryXie/BarrelfishOS
/**
 * \brief Make a "good" PDPTE table out of a page table.
 *
 * A "good" PDPTE table is one that has all physical address space and
 * the kernel mapped in. This function modifies the passed PDPTE, based
 * at physical address 'base' accordingly. It does this by taking out
 * the corresponding entries of the kernel's pristine PDPTE table.
 *
 * \param base  Physical base address of PDPTE table to make "good".
 */
void paging_x86_32_make_good_pdpte(lpaddr_t base)
{
    union x86_32_pdpte_entry   *newpdpte =
        (union x86_32_pdpte_entry *)local_phys_to_mem(base);
    int                 i;

    debug(SUBSYS_PAGING, "Is now a PDPTE: table = 0x%x\n", base);
    // Map memory
    for(i = X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET); i < X86_32_PDPTE_SIZE; i++) {
        newpdpte[i] = pdpte[i];
    }
}
コード例 #4
0
/// Map within a x86_32 pdpt
static errval_t x86_32_pdpt(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count > 1) { // disallow multiple pdpt mappings at a time
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_x86_32_pdir) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    if(slot >= X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdpt.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdpte_entry *entry =
        (union x86_32_pdpte_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdpte_entry),
                       pte_count);

    // Source
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_pdpte(entry, src_lp);
    paging_x86_32_context_switch(dcb_current->vspace); // To flush TLB

    return SYS_ERR_OK;
}
コード例 #5
0
ファイル: paging.c プロジェクト: CoryXie/BarrelfishOS
/**
 * \brief Map a region of physical memory into physical memory address space.
 *
 * Maps the region of physical memory, based at base and sized size bytes
 * to the same-sized virtual memory region. All pages are flagged according to
 * bitmap. This function automatically fills the needed page directory entries
 * in the page hierarchy rooted at pml4. base and size will be made
 * page-aligned by this function.
 *
 * \param base          Base address of memory region
 * \param size          Size in bytes of memory region
 * \param bitmap        Bitmap of flags for page tables/directories
 *
 * \return 0 on success, -1 on error (out of range)
 */
static int paging_x86_32_map_mem(lpaddr_t base, size_t size, uint64_t bitmap)
{
    lvaddr_t    vaddr, vbase = local_phys_to_mem(base);
    lpaddr_t    addr;

    paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE);

    // Is mapped region out of range?
    assert(local_phys_to_gen_phys(base + size) <= X86_32_PADDR_SPACE_LIMIT);
    if(local_phys_to_gen_phys(base + size) > X86_32_PADDR_SPACE_LIMIT) {
        printk(LOG_ERR, "Mapped region [%"PRIxLPADDR",%"PRIxLPADDR"]"
                        "out of physical address range!",
               base, base + size);
        return -1;
    }

    assert(local_phys_to_gen_phys(vbase + size) <= X86_32_VADDR_SPACE_SIZE);

    // Map pages, tables and directories
    for(vaddr = vbase, addr = base;;
        vaddr += X86_32_MEM_PAGE_SIZE, addr += X86_32_MEM_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_ptable_entry *pdir_base =
            &mem_pdir[X86_32_PDPTE_BASE(addr)][X86_32_PDIR_BASE(vaddr)];
#else
        union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];
#       ifndef CONFIG_PSE
        union x86_32_ptable_entry *ptable_base =
            &mem_ptable[X86_32_PDIR_BASE(addr)][X86_32_PTABLE_BASE(vaddr)];
#       endif
#endif

        if(vbase + size != 0) {
            if(vaddr >= vbase + size) {
                break;
            }
        }

#ifdef CONFIG_PAE
        debug(SUBSYS_PAGING, "Mapping 2M page: vaddr = 0x%x, addr = 0x%x, "
              "PDPTE_BASE = %u, PDIR_BASE = %u -- ", vaddr,
              addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr));
        mapit(pdpte_base, pdir_base, addr, bitmap);
#else
#       ifdef CONFIG_PSE
        debug(SUBSYS_PAGING, "Mapping 4M page: vaddr = 0x%x, addr = 0x%x, "
              "PDIR_BASE = %u -- ", vaddr,
              addr, X86_32_PDIR_BASE(vaddr));
        mapit(pdir_base, addr, bitmap);
#       else
        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR", "
              "addr = 0x%"PRIxLVADDR", "
              "PDIR_BASE = %"PRIuLPADDR", PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr,
              addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
        mapit(pdir_base, ptable_base, addr, bitmap);
#       endif
#endif

        if(vbase + size == 0) {
            // Bail out if mapped last page of address space to prevent overflow
            if(vaddr == 0xffe00000) {
                break;
            }
        }
    }

    return 0;
}
コード例 #6
0
ファイル: startup_arch.c プロジェクト: XuNazgul/cmpe295A
struct dcb *spawn_app_init(struct x86_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Construct cmdline args */
    // Core id of the core that booted this core
    char coreidchar[10];
    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

    // IPI channel id of core that booted this core
    char chanidchar[30];
    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

    // Arch id of the core that booted this core
    char archidchar[30];
    snprintf(archidchar, sizeof(archidchar), "archid=%d",
             core_data->src_arch_id);

    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
    int argc = 4;

#ifdef __scc__
    char urpc_frame_base_char[30];
    snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
             "frame=%" PRIuGENPADDR, core_data->urpc_frame_base);
    argv[argc++] = urpc_frame_base_char;
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
                                                  TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
                          core_data->urpc_frame_bits,
                          core_data->urpc_frame_bits, core_data->src_core_id,
                          urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i],
                   urpc_ptr + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    // elf load the domain
    genvaddr_t entry_point;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(core_data->monitor_binary),
                   core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = entry_point;

    return init_dcb;
}
コード例 #7
0
ファイル: startup_arch.c プロジェクト: XuNazgul/cmpe295A
static struct dcb *spawn_init_common(struct spawn_state *st, const char *name,
                                     int argc, const char *argv[],
                                     lpaddr_t bootinfo_phys,
                                     alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Perform arch-independent spawn */
    lvaddr_t paramaddr;
    struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
                                        ARGS_BASE, alloc_phys, &paramaddr);

    /* Init page tables */
    init_page_tables(st, alloc_phys);

    /* Map dispatcher R/W into VSpace starting at vaddr 0x204000
     * (Starting after Bootinfo pages)*/
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(DISPATCHER_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < DISPATCHER_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(DISPATCHER_BASE) + i],
                   mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    struct dispatcher_shared_generic *init_disp =
        get_dispatcher_shared_generic(init_dcb->disp);
    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);

    registers_set_param(&init_disp_x86_32->enabled_save_area, paramaddr);

    // Map IO cap in task cnode
    struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
    err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
    assert(err_is_ok(err));

    /* Set fields in DCB */
    // Set Vspace
#ifdef CONFIG_PAE
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdpte);
#else
    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdir);
#endif

    /* Initialize dispatcher */
    init_disp->disabled = true;
    strncpy(init_disp->name, argv[0], DISP_NAME_LEN);

    /* tell init the vspace addr of its dispatcher */
    init_disp->udisp = DISPATCHER_BASE;

    init_disp_x86_32->disabled_save_area.edi = DISPATCHER_BASE;
    init_disp_x86_32->disabled_save_area.fs = 0;
    init_disp_x86_32->disabled_save_area.gs = 0;
    init_disp_x86_32->disabled_save_area.cs = USER_CS;
    init_disp_x86_32->disabled_save_area.ss = USER_SS;
    init_disp_x86_32->disabled_save_area.eflags = USER_EFLAGS;
    
    return init_dcb;
}
コード例 #8
0
ファイル: startup_arch.c プロジェクト: XuNazgul/cmpe295A
static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
{
    /* Allocate memory for init's page tables */
#ifdef CONFIG_PAE
    init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE
                                           * sizeof(union x86_32_pdpte_entry)));
#endif
    init_pdir = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * sizeof(union x86_32_pdir_entry)));
    init_ptable = (void *)local_phys_to_mem(
                alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
                           * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry)));

    /* Page table setup */
    /* Initialize init page tables */
    for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
        paging_x86_32_clear_pdir(&init_pdir[j]);
        for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
            paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]);
        }
    }
    /* Map pagetables into pageCN */
    int     pagecn_pagemap = 0;
#ifdef CONFIG_PAE
    // Map PDPTE into first slot in pagecn
    caps_create_new(ObjType_VNode_x86_32_pdpt,
                    mem_to_local_phys((lvaddr_t)init_pdpte),
                    BASE_PAGE_BITS, 0, my_core_id,
                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
#endif
    // Map PDIR into successive slots in pagecn
    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_pdir,
                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Map page tables into successive slots in pagecn
    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
        caps_create_new(ObjType_VNode_x86_32_ptable,
                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
                        BASE_PAGE_BITS, 0, my_core_id,
                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
    }
    // Connect all page tables to page directories.
    // init's memory manager expects page tables within the pagecn to
    // already be connected to the corresponding directories. To avoid
    // unneccessary special cases, we connect them here.
    for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT;
        vaddr += BASE_PAGE_SIZE) {
#ifdef CONFIG_PAE
        union x86_32_pdpte_entry *pdpte_base =
            &init_pdpte[X86_32_PDPTE_BASE(vaddr)];
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE +
                       X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE *
                         X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) *
                         X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)];

        paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base));
#else
        union x86_32_pdir_entry *pdir_base =
            &init_pdir[X86_32_PDIR_BASE(vaddr)];
        union x86_32_ptable_entry *ptable_base =
            &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE +
                         X86_32_PTABLE_BASE(vaddr)];
#endif
        paging_x86_32_map_table(pdir_base,
                                mem_to_local_phys((lvaddr_t)ptable_base));
    }

    /* Switch to init's VSpace */
#ifdef CONFIG_PAE
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte));
#else
    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir));
#endif

    /***** VSpace available *****/

    /* Map cmdline args R/W into VSpace at ARGS_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i],
                   st->args_page + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }
}