Beispiel #1
0
void arm_mmu_init(void)
{
    /* unmap the initial mapings that are marked temporary */
    struct mmu_initial_mapping *map = mmu_initial_mappings;
    while (map->size > 0) {
        if (map->flags & MMU_INITIAL_MAPPING_TEMPORARY) {
            vaddr_t va = map->virt;
            size_t size = map->size;

            DEBUG_ASSERT(IS_SECTION_ALIGNED(size));

            while (size > 0) {
                arm_mmu_unmap_l1_entry(arm_kernel_translation_table, va / SECTION_SIZE);
                va += MB;
                size -= MB;
            }
        }
        map++;
    }
    arm_after_invalidate_tlb_barrier();

#if KERNEL_ASPACE_BASE != 0
    /* bounce the ttbr over to ttbr1 and leave 0 unmapped */
    uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
    DEBUG_ASSERT(n <= 7);

    uint32_t ttbcr = (1<<4) | n; /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */

    arm_write_ttbr1(arm_read_ttbr0());
    ISB;
    arm_write_ttbcr(ttbcr);
    ISB;
    arm_write_ttbr0(0);
    ISB;
#endif
}
Beispiel #2
0
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count)
{
    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));

    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    if (!IS_PAGE_ALIGNED(vaddr))
        return ERR_INVALID_ARGS;

    LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);

    int unmapped = 0;
    while (count > 0) {
        uint l1_index = vaddr / SECTION_SIZE;
        uint32_t tt_entry = aspace->tt_virt[l1_index];

        switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
            case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                /* this top level page is not mapped, move on to the next one */
                uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
                vaddr += page_cnt * PAGE_SIZE;
                count -= page_cnt;
                break;
            }
            case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
                    /* we're asked to remove at least all of this section, so just zero it out */
                    // XXX test for supersection
                    arm_mmu_unmap_section(aspace, vaddr);

                    vaddr += SECTION_SIZE;
                    count -= SECTION_SIZE / PAGE_SIZE;
                    unmapped += SECTION_SIZE / PAGE_SIZE;
                } else {
                    // XXX handle unmapping just part of a section
                    // will need to convert to a L2 table and then unmap the parts we are asked to
                    PANIC_UNIMPLEMENTED;
                }
                break;
            case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);

                /* unmap page run */
                for (uint i = 0; i < page_cnt; i++) {
                    l2_table[page_idx++] = 0;
                }
                DSB;

                /* invalidate tlb */
                for (uint i = 0; i < page_cnt; i++) {
                    arm_invalidate_tlb_mva_no_barrier(vaddr);
                    vaddr += PAGE_SIZE;
                }
                count -= page_cnt;
                unmapped += page_cnt;

                /*
                 * Check if all pages related to this l1 entry are deallocated.
                 * We only need to check pages that we did not clear above starting
                 * from page_idx and wrapped around SECTION.
                 */
                page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
                while (page_cnt) {
                    if (page_idx == (SECTION_SIZE / PAGE_SIZE))
                        page_idx = 0;
                    if (l2_table[page_idx++])
                        break;
                    page_cnt--;
                }
                if (!page_cnt) {
                    /* we can kill l1 entry */
                    arm_mmu_unmap_l1_entry(aspace->tt_virt, l1_index);

                    /* try to free l2 page itself */
                    put_l2_table(aspace, l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                }
                break;
            }

            default:
                // XXX not implemented supersections or L2 tables
                PANIC_UNIMPLEMENTED;
        }
    }
    arm_after_invalidate_tlb_barrier();
    return unmapped;
}
Beispiel #3
0
static void arm_mmu_unmap_section(arch_aspace_t *aspace, addr_t vaddr)
{
    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
    arm_mmu_unmap_l1_entry(aspace->tt_virt, vaddr / SECTION_SIZE);
}
Beispiel #4
0
Datei: mmu.c Projekt: taphier/lk
static void arm_mmu_unmap_section(addr_t vaddr)
{
    DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
    arm_mmu_unmap_l1_entry(vaddr / SECTION_SIZE);
}