Example #1
0
/**
 * \brief Reset kernel paging.
 *
 * This function resets the page maps for kernel and memory-space. It clears out
 * all other mappings. Use this only at system bootup!
 */
void paging_arm_reset(lpaddr_t paddr, size_t bytes)
{
    // make sure kernel pagetable is aligned to 16K after relocation
    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table, ARM_L1_ALIGN);

    // make sure low l2 pagetable is aligned to 1K after relocation
    aligned_low_l2_table = (union arm_l2_entry *)ROUND_UP((uintptr_t)low_l2_table, ARM_L2_ALIGN);

    // Re-map physical memory
    paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);

    // map first MB at granularity of 4K pages
    uint32_t l2_flags = ARM_L2_SMALL_USR_NONE | ARM_L2_SMALL_CACHEABLE | ARM_L2_SMALL_BUFFERABLE;
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, MEMORY_OFFSET,
                             mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    for(lpaddr_t pa=0; pa < ARM_L1_SECTION_BYTES; pa += BYTES_PER_PAGE)
    {
        lvaddr_t va = pa + MEMORY_OFFSET;
        paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(va)], pa, l2_flags);
    }

    // map high-mem relocated exception vector to corresponding page in low MB
    // core 0: 0xffff0000 -> 0x80000
    // core 1: 0xffff0000 -> 0x81000
    // ...
    paging_map_user_pages_l1((uintptr_t)aligned_kernel_l1_table, ETABLE_ADDR,
            mem_to_local_phys((uintptr_t)aligned_low_l2_table));
    int core_id = hal_get_cpu_id();
    lpaddr_t addr = ETABLE_PHYS_BASE + core_id * BASE_PAGE_SIZE;
    paging_set_l2_entry((uintptr_t *)&aligned_low_l2_table[ARM_L2_OFFSET(ETABLE_ADDR)], addr, l2_flags);

    cp15_write_ttbr1(mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));

    cp15_invalidate_tlb();
}
Example #2
0
/**
 * \brief Modify page mapping
 *
 * \param pmap     The pmap object
 * \param vaddr    The virtual address to unmap
 * \param flags    New flags for the mapping
 * \param retsize  If non-NULL, filled in with the actual size modified
 */
static errval_t
modify_flags(struct pmap     *pmap,
             genvaddr_t       vaddr,
             size_t           size,
             vregion_flags_t  flags,
             size_t          *retsize)
{
    errval_t err, ret = SYS_ERR_OK;
    struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
    size = ROUND_UP(size, BASE_PAGE_SIZE);
    size_t pte_count = size / BASE_PAGE_SIZE;
    genvaddr_t vend = vaddr + size;

    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
        // fast path
        err = do_single_modify_flags(pmap_arm, vaddr, pte_count, false);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_UNMAP);
        }
    }
    else { // slow path
        // unmap first leaf
        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
        err = do_single_modify_flags(pmap_arm, vaddr, c, false);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_UNMAP);
        }

        // unmap full leaves
        vaddr += c * BASE_PAGE_SIZE;
        while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
            c = ARM_L2_MAX_ENTRIES;
            err = do_single_modify_flags(pmap_arm, vaddr, c, true);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_UNMAP);
            }
            vaddr += c * BASE_PAGE_SIZE;
        }

        // unmap remaining part
        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
        if (c) {
            err = do_single_modify_flags(pmap_arm, vaddr, c, true);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_UNMAP);
            }
        }
    }

    if (retsize) {
        *retsize = size;
    }

    return ret;
}
Example #3
0
/* Map the exception vectors at VECTORS_BASE. */
static void map_vectors(void)
{
    /**
     * Map the L2 table to hold the high vectors mapping.
     */
    union arm_l1_entry *e_l1= &l1_high[ARM_L1_OFFSET(VECTORS_BASE)];
    e_l1->page_table.type= L1_TYPE_PAGE_TABLE_ENTRY;
    e_l1->page_table.base_address= ((uint32_t)l2_vec) >> ARM_L2_TABLE_BITS;

    /**
     * Now install a single small page mapping to cover the vectors.
     *
     * The mapping fields are set exactly as for the kernel's RAM sections -
     * see make_ram_section() for details.
     */
    union arm_l2_entry *e_l2= &l2_vec[ARM_L2_OFFSET(VECTORS_BASE)];
    e_l2->small_page.type= L2_TYPE_SMALL_PAGE;
    e_l2->small_page.tex=        1;
    e_l2->small_page.cacheable=  1;
    e_l2->small_page.bufferable= 1;
    e_l2->small_page.not_global= 0;
    e_l2->small_page.shareable=  1;
    e_l2->small_page.ap10=       1;
    e_l2->small_page.ap2=        0;

    /* The vectors must be at the beginning of a frame. */
    assert((((uint32_t)exception_vectors) & BASE_PAGE_MASK) == 0);
    e_l2->small_page.base_address=
        ((uint32_t)exception_vectors) >> BASE_PAGE_BITS;
}
Example #4
0
static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
                       struct capref frame, size_t offset, size_t size,
                       vregion_flags_t flags, size_t *retoff, size_t *retsize)
{
    errval_t err;

    size = ROUND_UP(size, BASE_PAGE_SIZE);
    size_t pte_count = DIVIDE_ROUND_UP(size, BASE_PAGE_SIZE);
    genvaddr_t vend = vaddr + size;

    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
        // fast path
        err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "[do_map] in fast path");
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }
    } else { // multiple leaf page tables
        // first leaf
        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
        genvaddr_t temp_end = vaddr + c * BASE_PAGE_SIZE;
        err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }

        // map full leaves
        while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
            vaddr = temp_end;
            temp_end = vaddr + ARM_L2_MAX_ENTRIES * BASE_PAGE_SIZE;
            offset += c * BASE_PAGE_SIZE;
            c = ARM_L2_MAX_ENTRIES;
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            frame = next;

            // do mapping
            err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }

        // map remaining part
        offset += c * BASE_PAGE_SIZE;
        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
        if (c) {
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }

            // do mapping
            err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }
    }
    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    //has_vnode_debug = false;
    return SYS_ERR_OK;
#if 0
    errval_t err;
    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);

    for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {

        vaddr += BASE_PAGE_SIZE;
    }

    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    return SYS_ERR_OK;
#endif
}