Exemple #1
0
static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_t *page_table)
{
    pte_t pte;
    paddr_t paddr;
    void *vaddr;
    int ret;

    pte = page_table[index];
    switch (pte & MMU_PTE_DESCRIPTOR_MASK) {
    case MMU_PTE_DESCRIPTOR_INVALID:
        ret = alloc_page_table(&paddr, page_size_shift);
        if (ret) {
            TRACEF("failed to allocate page table\n");
            return NULL;
        }
        vaddr = paddr_to_kvaddr(paddr);
        LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
        memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift);
        __asm__ volatile("dmb ishst" ::: "memory");
        pte = paddr | MMU_PTE_L012_DESCRIPTOR_TABLE;
        page_table[index] = pte;
        LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
        return vaddr;

    case MMU_PTE_L012_DESCRIPTOR_TABLE:
        paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
        LTRACEF("found page table 0x%lx\n", paddr);
        return paddr_to_kvaddr(paddr);

    case MMU_PTE_L012_DESCRIPTOR_BLOCK:
        return NULL;

    default:
        PANIC_UNIMPLEMENTED;
    }
}
Exemple #2
0
static int cmd_vm(int argc, const cmd_args *argv)
{
    if (argc < 2) {
notenoughargs:
        printf("not enough arguments\n");
usage:
        printf("usage:\n");
        printf("%s phys2virt <address>\n", argv[0].str);
        printf("%s virt2phys <address>\n", argv[0].str);
        printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
        printf("%s unmap <virt> <count>\n", argv[0].str);
        return ERR_GENERIC;
    }

    if (!strcmp(argv[1].str, "phys2virt")) {
        if (argc < 3) goto notenoughargs;

        void *ptr = paddr_to_kvaddr(argv[2].u);
        printf("paddr_to_kvaddr returns %p\n", ptr);
    } else if (!strcmp(argv[1].str, "virt2phys")) {
        if (argc < 3) goto notenoughargs;

        paddr_t pa;
        uint flags;
        status_t err = arch_mmu_query(argv[2].u, &pa, &flags);
        printf("arch_mmu_query returns %d\n", err);
        if (err >= 0) {
            printf("\tpa 0x%lx, flags 0x%x\n", pa, flags);
        }
    } else if (!strcmp(argv[1].str, "map")) {
        if (argc < 6) goto notenoughargs;

        int err = arch_mmu_map(argv[3].u, argv[2].u, argv[4].u, argv[5].u);
        printf("arch_mmu_map returns %d\n", err);
    } else if (!strcmp(argv[1].str, "unmap")) {
        if (argc < 4) goto notenoughargs;

        int err = arch_mmu_unmap(argv[2].u, argv[3].u);
        printf("arch_mmu_unmap returns %d\n", err);
    } else {
        printf("unknown command\n");
        goto usage;
    }

    return NO_ERROR;
}
Exemple #3
0
static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
                               size_t size,
                               uint index_shift, uint page_size_shift,
                               pte_t *page_table, uint asid)
{
    pte_t *next_page_table;
    vaddr_t index;
    size_t chunk_size;
    vaddr_t vaddr_rem;
    vaddr_t block_size;
    vaddr_t block_mask;
    pte_t pte;
    paddr_t page_table_paddr;

    LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n",
            vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table);

    while (size) {
        block_size = 1UL << index_shift;
        block_mask = block_size - 1;
        vaddr_rem = vaddr_rel & block_mask;
        chunk_size = MIN(size, block_size - vaddr_rem);
        index = vaddr_rel >> index_shift;

        pte = page_table[index];

        if (index_shift > page_size_shift &&
            (pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) {
            page_table_paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
            next_page_table = paddr_to_kvaddr(page_table_paddr);
            arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size,
                               index_shift - (page_size_shift - 3),
                               page_size_shift,
                               next_page_table, asid);
            if (chunk_size == block_size ||
                page_table_is_clear(next_page_table, page_size_shift)) {
                LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
                page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
                __asm__ volatile("dmb ishst" ::: "memory");
                free_page_table(next_page_table, page_table_paddr, page_size_shift);
            }
        } else if (pte) {
Exemple #4
0
static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *ppa)
{
    status_t ret;
    paddr_t pa;
    uint32_t tt_entry;

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(ppa);

    /* lookup an existing l2 pagetable */
    for (uint i = 0; i < L1E_PER_PAGE; i++) {
        tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
        if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
                == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
            *ppa = (paddr_t)ROUNDDOWN(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE)
                   + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
            return NO_ERROR;
        }
    }

    /* not found: allocate it */
    uint32_t *l2_va = pmm_alloc_kpages(1, &aspace->pt_page_list);
    if (!l2_va)
        return ERR_NO_MEMORY;

    /* wipe it clean to set no access */
    memset(l2_va, 0, PAGE_SIZE);

    /* get physical address */
    ret = arm_vtop((vaddr_t)l2_va, &pa);
    ASSERT(!ret);
    ASSERT(paddr_to_kvaddr(pa));

    DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va));
    DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));

    *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));

    LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
    return NO_ERROR;
}
Exemple #5
0
status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
{
    uint index;
    uint index_shift;
    pte_t pte;
    pte_t pte_addr;
    uint descriptor_type;
    pte_t *page_table;
    vaddr_t kernel_base = ~0UL << MMU_KERNEL_SIZE_SHIFT;
    vaddr_t vaddr_rem;

    if (vaddr < kernel_base) {
        TRACEF("vaddr 0x%lx < base 0x%lx\n", vaddr, kernel_base);
        return ERR_INVALID_ARGS;
    }

    index_shift = MMU_KERNEL_TOP_SHIFT;
    page_table = arm64_kernel_translation_table;

    vaddr_rem = vaddr - kernel_base;
    index = vaddr_rem >> index_shift;
    ASSERT(index < MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP);

    while (true) {
        index = vaddr_rem >> index_shift;
        vaddr_rem -= (vaddr_t)index << index_shift;
        pte = page_table[index];
        descriptor_type = pte & MMU_PTE_DESCRIPTOR_MASK;
        pte_addr = pte & MMU_PTE_OUTPUT_ADDR_MASK;

        LTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
                vaddr, index, index_shift, vaddr_rem, pte);

        if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID)
            return ERR_NOT_FOUND;

        if (descriptor_type == ((index_shift > MMU_KERNEL_PAGE_SIZE_SHIFT) ?
                                 MMU_PTE_L012_DESCRIPTOR_BLOCK :
                                 MMU_PTE_L3_DESCRIPTOR_PAGE)) {
            break;
        }

        if (index_shift <= MMU_KERNEL_PAGE_SIZE_SHIFT ||
            descriptor_type != MMU_PTE_L012_DESCRIPTOR_TABLE) {
            PANIC_UNIMPLEMENTED;
        }

        page_table = paddr_to_kvaddr(pte_addr);
        index_shift -= MMU_KERNEL_PAGE_SIZE_SHIFT - 3;
    }

    if (paddr)
        *paddr = pte_addr + vaddr_rem;
    if (flags) {
        *flags = 0;
        switch (pte & MMU_PTE_ATTR_ATTR_INDEX_MASK) {
            case MMU_PTE_ATTR_STRONGLY_ORDERED:
                *flags |= ARCH_MMU_FLAG_UNCACHED;
                break;
            case MMU_PTE_ATTR_DEVICE:
                *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
                break;
            case MMU_PTE_ATTR_NORMAL_MEMORY:
                break;
            default:
                PANIC_UNIMPLEMENTED;
        }
        switch (pte & MMU_PTE_ATTR_AP_MASK) {
            case MMU_PTE_ATTR_AP_P_RW_U_NA:
                break;
            case MMU_PTE_ATTR_AP_P_RW_U_RW:
                *flags |= ARCH_MMU_FLAG_PERM_USER;
                break;
            case MMU_PTE_ATTR_AP_P_RO_U_NA:
                *flags |= ARCH_MMU_FLAG_PERM_RO;
                break;
            case MMU_PTE_ATTR_AP_P_RO_U_RO:
                *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
                break;
        }
    }
    LTRACEF("va 0x%lx, paddr 0x%lx, flags 0x%x\n",
            vaddr, paddr ? *paddr : ~0UL, flags ? *flags : ~0U);
    return 0;
}
Exemple #6
0
static void zybo_common_target_init(uint level)
{
    status_t err;

    /* zybo has a spiflash on qspi */
    spiflash_detect();

    bdev_t *spi = bio_open("spi0");
    if (spi) {
        /* find or create a partition table at the start of flash */
        if (ptable_scan(spi, 0) < 0) {
            ptable_create_default(spi, 0);
        }

        struct ptable_entry entry = { 0 };

        /* find and recover sysparams */
        if (ptable_find("sysparam", &entry) < 0) {
            /* didn't find sysparam partition, create it */
            ptable_add("sysparam", 0x1000, 0x1000, 0);
            ptable_find("sysparam", &entry);
        }

        if (entry.length > 0) {
            sysparam_scan(spi, entry.offset, entry.length);

#if SYSPARAM_ALLOW_WRITE
            /* for testing purposes, put at least one sysparam value in */
            if (sysparam_add("dummy", "value", sizeof("value")) >= 0) {
                sysparam_write();
            }
#endif

            sysparam_dump(true);
        }

        /* create bootloader partition if it does not exist */
        ptable_add("bootloader", 0x20000, 0x40000, 0);

        printf("flash partition table:\n");
        ptable_dump();
    }

    /* recover boot arguments */
    const char *cmdline = bootargs_get_command_line();
    if (cmdline) {
        printf("command line: '%s'\n", cmdline);
    }

    /* see if we came from a bootimage */
    uintptr_t bootimage_phys;
    size_t bootimage_size;
    if (bootargs_get_bootimage_pointer(&bootimage_phys, &bootimage_size) >= 0) {
        printf("our bootimage is at phys 0x%lx, size %zx\n", bootimage_phys, bootimage_size);

        void *ptr = paddr_to_kvaddr(bootimage_phys);
        if (ptr) {
            bootimage_t *bi;
            if (bootimage_open(ptr, bootimage_size, &bi) >= 0) {
                /* we have a valid bootimage, find the fpga section */
                const void *fpga_ptr;
                size_t fpga_len;

                if (bootimage_get_file_section(bi, TYPE_FPGA_IMAGE, &fpga_ptr, &fpga_len) >= 0) {
                    /* we have a fpga image */

                    /* lookup the physical address of the bitfile */
                    paddr_t pa = kvaddr_to_paddr((void *)fpga_ptr);
                    if (pa != 0) {
                        /* program the fpga with it*/
                        printf("loading fpga image at %p (phys 0x%lx), len %zx\n", fpga_ptr, pa, fpga_len);
                        zynq_reset_fpga();
                        err = zynq_program_fpga(pa, fpga_len);
                        if (err < 0) {
                            printf("error %d loading fpga\n", err);
                        }
                        printf("fpga image loaded\n");
                    }
                }
            }
        }
    }

#if WITH_LIB_MINIP
    /* pull some network stack related params out of the sysparam block */
    uint8_t mac_addr[6];
    uint32_t ip_addr = IPV4_NONE;
    uint32_t ip_mask = IPV4_NONE;
    uint32_t ip_gateway = IPV4_NONE;

    if (sysparam_read("net0.mac_addr", mac_addr, sizeof(mac_addr)) < (ssize_t)sizeof(mac_addr)) {
        /* couldn't find eth address, make up a random one */
        for (size_t i = 0; i < sizeof(mac_addr); i++) {
            mac_addr[i] = rand() & 0xff;
        }

        /* unicast and locally administered */
        mac_addr[0] &= ~(1<<0);
        mac_addr[0] |= (1<<1);
    }

    uint8_t use_dhcp = 0;
    sysparam_read("net0.use_dhcp", &use_dhcp, sizeof(use_dhcp));
    sysparam_read("net0.ip_addr", &ip_addr, sizeof(ip_addr));
    sysparam_read("net0.ip_mask", &ip_mask, sizeof(ip_mask));
    sysparam_read("net0.ip_gateway", &ip_gateway, sizeof(ip_gateway));

    minip_set_macaddr(mac_addr);
    gem_set_macaddr(mac_addr);

    if (!use_dhcp && ip_addr != IPV4_NONE) {
        minip_init(gem_send_raw_pkt, NULL, ip_addr, ip_mask, ip_gateway);
    } else {
        /* Configure IP stack and hook to the driver */
        minip_init_dhcp(gem_send_raw_pkt, NULL);
    }
    gem_set_callback(minip_rx_driver_callback);
#endif
}
Exemple #7
0
/* try to boot the system from a flash partition */
status_t do_flash_boot(void)
{
    status_t err;

    LTRACE_ENTRY;

    /* construct a boot argument list */
    const size_t bootargs_size = PAGE_SIZE;
#if 0
    /* old code */
    void *args = (void *)((uintptr_t)lkb_iobuffer + lkb_iobuffer_size - bootargs_size);
    paddr_t args_phys = lkb_iobuffer_phys + lkb_iobuffer_size - bootargs_size;
#elif PLATFORM_ZYNQ
    /* grab the top page of sram */
    paddr_t args_phys = SRAM_BASE + SRAM_SIZE - bootargs_size;
    void *args = paddr_to_kvaddr(args_phys);
#else
#error need better way
#endif
    LTRACEF("boot args %p, phys 0x%lx, len %zu\n", args, args_phys, bootargs_size);

    bootargs_start(args, bootargs_size);
    bootargs_add_command_line(args, bootargs_size, "what what");
    arch_clean_cache_range((vaddr_t)args, bootargs_size);

    ulong lk_args[4];
    bootargs_generate_lk_arg_values(args_phys, lk_args);

    const void *ptr;

    if (!ptable_found_valid()) {
        TRACEF("ptable not found\n");
        return ERR_NOT_FOUND;
    }

    /* find the system partition */
    struct ptable_entry entry;
    err = ptable_find("system", &entry);
    if (err < 0) {
        TRACEF("cannot find system partition\n");
        return ERR_NOT_FOUND;
    }

    /* get a direct pointer to the device */
    bdev_t *bdev = ptable_get_device();
    if (!bdev) {
        TRACEF("error opening boot device\n");
        return ERR_NOT_FOUND;
    }

    /* convert the bdev to a memory pointer */
    err = bio_ioctl(bdev, BIO_IOCTL_GET_MEM_MAP, (void *)&ptr);
    TRACEF("err %d, ptr %p\n", err, ptr);
    if (err < 0) {
        TRACEF("error getting direct pointer to block device\n");
        return ERR_NOT_FOUND;
    }

    /* sniff it to see if it's a bootimage or a raw image */
    bootimage_t *bi;
    if (bootimage_open((char *)ptr + entry.offset, entry.length, &bi) >= 0) {
        size_t len;

        /* it's a bootimage */
        TRACEF("detected bootimage\n");

        /* find the lk image */
        if (bootimage_get_file_section(bi, TYPE_LK, &ptr, &len) >= 0) {
            TRACEF("found lk section at %p\n", ptr);

            /* add the boot image to the argument list */
            size_t bootimage_size;
            bootimage_get_range(bi, NULL, &bootimage_size);

            bootargs_add_bootimage_pointer(args, bootargs_size, bdev->name, entry.offset, bootimage_size);
        }
    } else {
        /* did not find a bootimage, abort */
        bio_ioctl(bdev, BIO_IOCTL_PUT_MEM_MAP, NULL);
        return ERR_NOT_FOUND;
    }

    TRACEF("chain loading binary at %p\n", ptr);
    arch_chain_load((void *)ptr, lk_args[0], lk_args[1], lk_args[2], lk_args[3]);

    /* put the block device back into block mode (though we never get here) */
    bio_ioctl(bdev, BIO_IOCTL_PUT_MEM_MAP, NULL);

    return NO_ERROR;
}
Exemple #8
0
static int do_boot(lkb_t *lkb, size_t len, const char **result)
{
    LTRACEF("lkb %p, len %zu, result %p\n", lkb, len, result);

    void *buf;
    paddr_t buf_phys;

    if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "lkboot_iobuf",
        len, &buf, log2_uint(1024*1024), 0, ARCH_MMU_FLAG_UNCACHED) < 0) {
        *result = "not enough memory";
        return -1;
    }
    buf_phys = vaddr_to_paddr(buf);
    LTRACEF("iobuffer %p (phys 0x%lx)\n", buf, buf_phys);

    if (lkb_read(lkb, buf, len)) {
        *result = "io error";
        // XXX free buffer here
        return -1;
    }

    /* construct a boot argument list */
    const size_t bootargs_size = PAGE_SIZE;
#if 0
    void *args = (void *)((uintptr_t)lkb_iobuffer + lkb_iobuffer_size - bootargs_size);
    paddr_t args_phys = lkb_iobuffer_phys + lkb_iobuffer_size - bootargs_size;
#elif PLATFORM_ZYNQ
    /* grab the top page of sram */
    /* XXX do this better */
    paddr_t args_phys = SRAM_BASE + SRAM_SIZE - bootargs_size;
    void *args = paddr_to_kvaddr(args_phys);
#else
#error need better way
#endif
    LTRACEF("boot args %p, phys 0x%lx, len %zu\n", args, args_phys, bootargs_size);

    bootargs_start(args, bootargs_size);
    bootargs_add_command_line(args, bootargs_size, "what what");
    arch_clean_cache_range((vaddr_t)args, bootargs_size);

    ulong lk_args[4];
    bootargs_generate_lk_arg_values(args_phys, lk_args);

    const void *ptr;

    /* sniff it to see if it's a bootimage or a raw image */
    bootimage_t *bi;
    if (bootimage_open(buf, len, &bi) >= 0) {
        size_t len;

        /* it's a bootimage */
        TRACEF("detected bootimage\n");

        /* find the lk image */
        if (bootimage_get_file_section(bi, TYPE_LK, &ptr, &len) >= 0) {
            TRACEF("found lk section at %p\n", ptr);

            /* add the boot image to the argument list */
            size_t bootimage_size;
            bootimage_get_range(bi, NULL, &bootimage_size);

            bootargs_add_bootimage_pointer(args, bootargs_size, "pmem", buf_phys, bootimage_size);
        }
    } else {
        /* raw image, just chain load it directly */
        TRACEF("raw image, chainloading\n");

        ptr = buf;
    }

    /* start a boot thread to complete the startup */
    static struct chainload_args cl_args;

    cl_args.func = (void *)ptr;
    cl_args.args[0] = lk_args[0];
    cl_args.args[1] = lk_args[1];
    cl_args.args[2] = lk_args[2];
    cl_args.args[3] = lk_args[3];

    thread_resume(thread_create("boot", &chainload_thread, &cl_args,
        DEFAULT_PRIORITY, DEFAULT_STACK_SIZE));

    return 0;
}
Exemple #9
0
int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count)
{
    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));

    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    if (!IS_PAGE_ALIGNED(vaddr))
        return ERR_INVALID_ARGS;

    LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);

    int unmapped = 0;
    while (count > 0) {
        uint l1_index = vaddr / SECTION_SIZE;
        uint32_t tt_entry = aspace->tt_virt[l1_index];

        switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
            case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                /* this top level page is not mapped, move on to the next one */
                uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
                vaddr += page_cnt * PAGE_SIZE;
                count -= page_cnt;
                break;
            }
            case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
                    /* we're asked to remove at least all of this section, so just zero it out */
                    // XXX test for supersection
                    arm_mmu_unmap_section(aspace, vaddr);

                    vaddr += SECTION_SIZE;
                    count -= SECTION_SIZE / PAGE_SIZE;
                    unmapped += SECTION_SIZE / PAGE_SIZE;
                } else {
                    // XXX handle unmapping just part of a section
                    // will need to convert to a L2 table and then unmap the parts we are asked to
                    PANIC_UNIMPLEMENTED;
                }
                break;
            case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);

                /* unmap page run */
                for (uint i = 0; i < page_cnt; i++) {
                    l2_table[page_idx++] = 0;
                }
                DSB;

                /* invalidate tlb */
                for (uint i = 0; i < page_cnt; i++) {
                    arm_invalidate_tlb_mva_no_barrier(vaddr);
                    vaddr += PAGE_SIZE;
                }
                count -= page_cnt;
                unmapped += page_cnt;

                /*
                 * Check if all pages related to this l1 entry are deallocated.
                 * We only need to check pages that we did not clear above starting
                 * from page_idx and wrapped around SECTION.
                 */
                page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
                while (page_cnt) {
                    if (page_idx == (SECTION_SIZE / PAGE_SIZE))
                        page_idx = 0;
                    if (l2_table[page_idx++])
                        break;
                    page_cnt--;
                }
                if (!page_cnt) {
                    /* we can kill l1 entry */
                    arm_mmu_unmap_l1_entry(aspace->tt_virt, l1_index);

                    /* try to free l2 page itself */
                    put_l2_table(aspace, l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                }
                break;
            }

            default:
                // XXX not implemented supersections or L2 tables
                PANIC_UNIMPLEMENTED;
        }
    }
    arm_after_invalidate_tlb_barrier();
    return unmapped;
}
Exemple #10
0
int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, uint flags)
{
    LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

#if !WITH_ARCH_MMU_PICK_SPOT
    if (flags & ARCH_MMU_FLAG_NS) {
        /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
        panic("NS mem is not supported\n");
    }
#endif

    /* paddr and vaddr must be aligned */
    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
    if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
        return ERR_INVALID_ARGS;

    if (count == 0)
        return NO_ERROR;

    /* see what kind of mapping we can use */
    int mapped = 0;
    while (count > 0) {
        if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
            /* we can use a section */

            /* compute the arch flags for L1 sections */
            uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
                              MMU_MEMORY_L1_DESCRIPTOR_SECTION;

            /* map it */
            arm_mmu_map_section(aspace, paddr, vaddr, arch_flags);
            count -= SECTION_SIZE / PAGE_SIZE;
            mapped += SECTION_SIZE / PAGE_SIZE;
            vaddr += SECTION_SIZE;
            paddr += SECTION_SIZE;
        } else {
            /* will have to use a L2 mapping */
            uint l1_index = vaddr / SECTION_SIZE;
            uint32_t tt_entry = aspace->tt_virt[l1_index];

            LTRACEF("tt_entry 0x%x\n", tt_entry);
            switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
                case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                    // XXX will have to break L1 mapping into a L2 page table
                    PANIC_UNIMPLEMENTED;
                    break;
                case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                    paddr_t l2_pa = 0;
                    if (get_l2_table(aspace, l1_index, &l2_pa) != NO_ERROR) {
                        TRACEF("failed to allocate pagetable\n");
                        goto done;
                    }
                    tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
                    if (flags & ARCH_MMU_FLAG_NS)
                        tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;

                    aspace->tt_virt[l1_index] = tt_entry;
                }
                    /* fallthrough */
                case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                    uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                    LTRACEF("l2_table at %p\n", l2_table);

                    DEBUG_ASSERT(l2_table);

                    // XXX handle 64K pages here

                    /* compute the arch flags for L2 4K pages */
                    uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);

                    uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                    do {
                        l2_table[l2_index++] = paddr | arch_flags;
                        count--;
                        mapped++;
                        vaddr += PAGE_SIZE;
                        paddr += PAGE_SIZE;
                    } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
                    break;
                }
                default:
                    PANIC_UNIMPLEMENTED;
            }
        }
    }

done:
    DSB;
    return mapped;
}
Exemple #11
0
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags)
{
    LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

    /* Get the index into the translation table */
    uint index = vaddr / MB;

    /* decode it */
    uint32_t tt_entry = aspace->tt_virt[index];
    switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
        case MMU_MEMORY_L1_DESCRIPTOR_INVALID:
            return ERR_NOT_FOUND;
        case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
            if (tt_entry & (1<<18)) {
                /* supersection */
                PANIC_UNIMPLEMENTED;
            }

            /* section */
            if (paddr)
                *paddr = MMU_MEMORY_L1_SECTION_ADDR(tt_entry) + (vaddr & (SECTION_SIZE - 1));

            if (flags) {
                *flags = 0;
                if (tt_entry & MMU_MEMORY_L1_SECTION_NON_SECURE)
                    *flags |= ARCH_MMU_FLAG_NS;
                switch (tt_entry & MMU_MEMORY_L1_TYPE_MASK) {
                    case MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED:
                        *flags |= ARCH_MMU_FLAG_UNCACHED;
                        break;
                    case MMU_MEMORY_L1_TYPE_DEVICE_SHARED:
                    case MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED:
                        *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
                        break;
                }
                switch (tt_entry & MMU_MEMORY_L1_AP_MASK) {
                    case MMU_MEMORY_L1_AP_P_RO_U_NA:
                        *flags |= ARCH_MMU_FLAG_PERM_RO;
                        break;
                    case MMU_MEMORY_L1_AP_P_RW_U_NA:
                        break;
                    case MMU_MEMORY_L1_AP_P_RO_U_RO:
                        *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
                        break;
                    case MMU_MEMORY_L1_AP_P_RW_U_RW:
                        *flags |= ARCH_MMU_FLAG_PERM_USER;
                        break;
                }
                if (tt_entry & MMU_MEMORY_L1_SECTION_XN) {
                    *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
                }
            }
            break;
        case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
            uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
            uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
            uint32_t l2_entry = l2_table[l2_index];

            //LTRACEF("l2_table at %p, index %u, entry 0x%x\n", l2_table, l2_index, l2_entry);

            switch (l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) {
                default:
                case MMU_MEMORY_L2_DESCRIPTOR_INVALID:
                    return ERR_NOT_FOUND;
                case MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE:
                    PANIC_UNIMPLEMENTED;
                    break;
                case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE:
                case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN:
                    if (paddr)
                        *paddr = MMU_MEMORY_L2_SMALL_PAGE_ADDR(l2_entry) + (vaddr & (PAGE_SIZE - 1));

                    if (flags) {
                        *flags = 0;
                        /* NS flag is only present on L1 entry */
                        if (tt_entry & MMU_MEMORY_L1_PAGETABLE_NON_SECURE)
                            *flags |= ARCH_MMU_FLAG_NS;
                        switch (l2_entry & MMU_MEMORY_L2_TYPE_MASK) {
                            case MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED:
                                *flags |= ARCH_MMU_FLAG_UNCACHED;
                                break;
                            case MMU_MEMORY_L2_TYPE_DEVICE_SHARED:
                            case MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED:
                                *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
                                break;
                        }
                        switch (l2_entry & MMU_MEMORY_L2_AP_MASK) {
                            case MMU_MEMORY_L2_AP_P_RO_U_NA:
                                *flags |= ARCH_MMU_FLAG_PERM_RO;
                                break;
                            case MMU_MEMORY_L2_AP_P_RW_U_NA:
                                break;
                            case MMU_MEMORY_L2_AP_P_RO_U_RO:
                                *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
                                break;
                            case MMU_MEMORY_L2_AP_P_RW_U_RW:
                                *flags |= ARCH_MMU_FLAG_PERM_USER;
                                break;
                        }
                        if ((l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) ==
                                MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN) {
                            *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
                        }
                    }
                    break;
            }

            break;
        }
        default:
            PANIC_UNIMPLEMENTED;
    }

    return NO_ERROR;
}