예제 #1
0
파일: pmap_arch.c 프로젝트: Karamax/arrakis
/**
 * \brief Modify page mapping
 *
 * \param pmap     The pmap object
 * \param vaddr    The virtual address to unmap
 * \param flags    New flags for the mapping
 * \param retsize  If non-NULL, filled in with the actual size modified
 */
static errval_t
modify_flags(struct pmap     *pmap,
             genvaddr_t       vaddr,
             size_t           size,
             vregion_flags_t  flags,
             size_t          *retsize)
{
    errval_t err, ret = SYS_ERR_OK;
    struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
    size = ROUND_UP(size, BASE_PAGE_SIZE);
    size_t pte_count = size / BASE_PAGE_SIZE;
    genvaddr_t vend = vaddr + size;

    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
        // fast path
        err = do_single_modify_flags(pmap_arm, vaddr, pte_count, false);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_UNMAP);
        }
    }
    else { // slow path
        // unmap first leaf
        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
        err = do_single_modify_flags(pmap_arm, vaddr, c, false);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_UNMAP);
        }

        // unmap full leaves
        vaddr += c * BASE_PAGE_SIZE;
        while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
            c = ARM_L2_MAX_ENTRIES;
            err = do_single_modify_flags(pmap_arm, vaddr, c, true);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_UNMAP);
            }
            vaddr += c * BASE_PAGE_SIZE;
        }

        // unmap remaining part
        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
        if (c) {
            err = do_single_modify_flags(pmap_arm, vaddr, c, true);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_UNMAP);
            }
        }
    }

    if (retsize) {
        *retsize = size;
    }

    return ret;
}
예제 #2
0
/* Map the exception vectors at VECTORS_BASE. */
static void map_vectors(void)
{
    /**
     * Map the L2 table to hold the high vectors mapping.
     */
    union arm_l1_entry *e_l1= &l1_high[ARM_L1_OFFSET(VECTORS_BASE)];
    e_l1->page_table.type= L1_TYPE_PAGE_TABLE_ENTRY;
    e_l1->page_table.base_address= ((uint32_t)l2_vec) >> ARM_L2_TABLE_BITS;

    /**
     * Now install a single small page mapping to cover the vectors.
     *
     * The mapping fields are set exactly as for the kernel's RAM sections -
     * see make_ram_section() for details.
     */
    union arm_l2_entry *e_l2= &l2_vec[ARM_L2_OFFSET(VECTORS_BASE)];
    e_l2->small_page.type= L2_TYPE_SMALL_PAGE;
    e_l2->small_page.tex=        1;
    e_l2->small_page.cacheable=  1;
    e_l2->small_page.bufferable= 1;
    e_l2->small_page.not_global= 0;
    e_l2->small_page.shareable=  1;
    e_l2->small_page.ap10=       1;
    e_l2->small_page.ap2=        0;

    /* The vectors must be at the beginning of a frame. */
    assert((((uint32_t)exception_vectors) & BASE_PAGE_MASK) == 0);
    e_l2->small_page.base_address=
        ((uint32_t)exception_vectors) >> BASE_PAGE_BITS;
}
예제 #3
0
static void
paging_write_l1_entry(uintptr_t ttbase, lvaddr_t va, union arm_l1_entry l1)
{
    union arm_l1_entry *l1_table;
    if (ttbase == 0) {
        if(va < MEMORY_OFFSET)
            ttbase = cp15_read_ttbr0() + MEMORY_OFFSET;
        else
            ttbase = cp15_read_ttbr1() + MEMORY_OFFSET;
    }
    l1_table = (union arm_l1_entry *) ttbase;
    l1_table[ARM_L1_OFFSET(va)] = l1;
}
예제 #4
0
/**
 * \brief Map a device into the kernel's address space.  
 * 
 * \param device_base is the physical address of the device
 * \param device_size is the number of bytes of physical address space
 * the device occupies. 
 *
 * \return the kernel virtual address of the mapped device, or panic. 
 */
lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
{
    // We map all hardware devices in the kernel using sections in the
    // top quarter (0xC0000000-0xFE000000) of the address space, just
    // below the exception vectors.  
    // 
    // It makes sense to use sections since (1) we don't map many
    // devices in the CPU driver anyway, and (2) if we did, it might
    // save a wee bit of TLB space. 
    //

    // First, we make sure that the device fits into a single
    // section. 
    if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
        panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
              dev_base, dev_size );
    }
    
    // Now, walk down the page table looking for either (a) an

    // existing mapping, in which case return the address the device
    // is already mapped to, or an invalid mapping, in which case map
    // it. 
    uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
    uint32_t dev_offset  = ARM_L1_SECTION_OFFSET(dev_base);
    lvaddr_t dev_virt    = 0;
    
    for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {

        // Work out the virtual address we're looking at
        dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);

        // If we already have a mapping for that address, return it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
             l1_high[i].section.base_address == dev_section ) {
            return dev_virt + dev_offset;
        }

        // Otherwise, if it's free, map it. 
        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
            map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
            cp15_invalidate_i_and_d_caches_fast();
            cp15_invalidate_tlb();
            return dev_virt + dev_offset;
        } 
    }
    // We're all out of section entries :-(
    panic("Ran out of section entries to map a kernel device");
}
예제 #5
0
파일: pmap_arch.c 프로젝트: Karamax/arrakis
static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
                       struct capref frame, size_t offset, size_t size,
                       vregion_flags_t flags, size_t *retoff, size_t *retsize)
{
    errval_t err;

    size = ROUND_UP(size, BASE_PAGE_SIZE);
    size_t pte_count = DIVIDE_ROUND_UP(size, BASE_PAGE_SIZE);
    genvaddr_t vend = vaddr + size;

    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
        // fast path
        err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "[do_map] in fast path");
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }
    } else { // multiple leaf page tables
        // first leaf
        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
        genvaddr_t temp_end = vaddr + c * BASE_PAGE_SIZE;
        err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }

        // map full leaves
        while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
            vaddr = temp_end;
            temp_end = vaddr + ARM_L2_MAX_ENTRIES * BASE_PAGE_SIZE;
            offset += c * BASE_PAGE_SIZE;
            c = ARM_L2_MAX_ENTRIES;
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            frame = next;

            // do mapping
            err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }

        // map remaining part
        offset += c * BASE_PAGE_SIZE;
        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
        if (c) {
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }

            // do mapping
            err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }
    }
    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    //has_vnode_debug = false;
    return SYS_ERR_OK;
#if 0
    errval_t err;
    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);

    for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {

        vaddr += BASE_PAGE_SIZE;
    }

    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    return SYS_ERR_OK;
#endif
}
예제 #6
0
static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1)
{
    assert( va >= MEMORY_OFFSET );
    l1_high[ARM_L1_OFFSET(va)] = l1;
}