Пример #1
0
/*===========================================================================*
 *			    pt_ptalloc_in_range		     		     *
 *===========================================================================*/
int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
                        u32_t flags, int verify)
{
    /* Allocate all the page tables in the range specified. */
    int pde, first_pde, last_pde;

    first_pde = ARCH_VM_PDE(start);
    last_pde = ARCH_VM_PDE(end-1);

    assert(first_pde >= 0);
    assert(last_pde < ARCH_VM_DIR_ENTRIES);

    /* Scan all page-directory entries in the range. */
    for(pde = first_pde; pde <= last_pde; pde++) {
        assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
            int r;
            if(verify) {
                printf("pt_ptalloc_in_range: no pde %d\n", pde);
                return EFAULT;
            }
            assert(!pt->pt_dir[pde]);
            if((r=pt_ptalloc(pt, pde, flags)) != OK) {
                /* Couldn't do (complete) mapping.
                 * Don't bother freeing any previously
                 * allocated page tables, they're
                 * still writable, don't point to nonsense,
                 * and pt_ptalloc leaves the directory
                 * and other data in a consistent state.
                 */
                return r;
            }
            assert(pt->pt_pt[pde]);
        }
        assert(pt->pt_pt[pde]);
        assert(pt->pt_dir[pde]);
        assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
    }

    return OK;
}
Пример #2
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
void pt_init(void)
{
    pt_t *newpt;
    int s, r, p;
    vir_bytes sparepages_mem;
#if defined(__arm__)
    vir_bytes sparepagedirs_mem;
#endif
    static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
    int m = kernel_boot_info.kern_mod;
#if defined(__i386__)
    int global_bit_ok = 0;
    u32_t mypdbr; /* Page Directory Base Register (cr3) value */
#elif defined(__arm__)
    u32_t myttbr;
#endif

    /* Find what the physical location of the kernel is. */
    assert(m >= 0);
    assert(m < kernel_boot_info.mods_with_kernel);
    assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
    kern_mb_mod = &kernel_boot_info.module_list[m];
    kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
    assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
    assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
    kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;

    /* Get ourselves spare pages. */
    sparepages_mem = (vir_bytes) static_sparepages;
    assert(!(sparepages_mem % VM_PAGE_SIZE));

#if defined(__arm__)
    /* Get ourselves spare pagedirs. */
    sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
    assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
#endif

    /* Spare pages are used to allocate memory before VM has its own page
     * table that things (i.e. arbitrary physical memory) can be mapped into.
     * We get it by pre-allocating it in our bss (allocated and mapped in by
     * the kernel) in static_sparepages. We also need the physical addresses
     * though; we look them up now so they are ready for use.
     */
#if defined(__arm__)
    missing_sparedirs = 0;
    assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
    for(s = 0; s < SPAREPAGEDIRS; s++) {
        vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       ARCH_PAGEDIR_SIZE, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        if(s >= STATIC_SPAREPAGEDIRS) {
            sparepagedirs[s].pagedir = NULL;
            missing_sparedirs++;
            continue;
        }
        sparepagedirs[s].pagedir = (void *) v;
        sparepagedirs[s].phys = ph;
    }
#endif

    if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
        panic("reservedqueue_new for single pages failed");

    assert(STATIC_SPAREPAGES < SPAREPAGES);
    for(s = 0; s < STATIC_SPAREPAGES; s++) {
        void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
        phys_bytes ph;
        if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
                       VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
            panic("pt_init: sys_umap failed: %d", r);
        reservedqueue_add(spare_pagequeue, v, ph);
    }

#if defined(__i386__)
    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;
#endif

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = freepde();
        offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            int usedpde;
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                panic("VM: too many kernel mappings: %d", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].vir_addr = offset;
            kern_mappings[index].flags =
                ARCH_VM_PTE_PRESENT;
            if(flags & VMMF_UNCACHED)
#if defined(__i386__)
                kern_mappings[index].flags |= PTF_NOCACHE;
#elif defined(__arm__)
                kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
#endif
            if(flags & VMMF_USER)
                kern_mappings[index].flags |= ARCH_VM_PTE_USER;
#if defined(__arm__)
            else
                kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
#endif
            if(flags & VMMF_WRITE)
                kern_mappings[index].flags |= ARCH_VM_PTE_RW;
#if defined(__i386__)
            if(flags & VMMF_GLO)
                kern_mappings[index].flags |= I386_VM_GLOBAL;
#elif defined(__arm__)
            else
                kern_mappings[index].flags |= ARCH_VM_PTE_RO;
#endif
            if(addr % VM_PAGE_SIZE)
                panic("VM: addr unaligned: %d", addr);
            if(len % VM_PAGE_SIZE)
                panic("VM: len unaligned: %d", len);
            vir = offset;
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                panic("VM: reply failed");
            offset += len;
            index++;
            kernmappings++;

            usedpde = ARCH_VM_PDE(offset);
            while(usedpde > kernmap_pde) {
                int newpde = freepde();
                assert(newpde == kernmap_pde+1);
                kernmap_pde = newpde;
            }
        }
    }

    /* Reserve PDEs available for mapping in the page directories. */
    {
        int pd;
        for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
            struct pdm *pdm = &pagedir_mappings[pd];
            pdm->pdeno = freepde();
            phys_bytes ph;

            /* Allocate us a page table in which to
             * remember page directory pointers.
             */
            if(!(pdm->page_directories =
                        vm_allocpage(&ph, VMP_PAGETABLE))) {
                panic("no virt addr for vm mappings");
            }
            memset(pdm->page_directories, 0, VM_PAGE_SIZE);
            pdm->phys = ph;

#if defined(__i386__)
            pdm->val = (ph & ARCH_VM_ADDR_MASK) |
                       ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
#elif defined(__arm__)
            pdm->val = (ph & ARCH_VM_PDE_MASK)
                       | ARCH_VM_PDE_PRESENT
                       | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
        }
    }

    /* Allright. Now. We have to make our own page directory and page tables,
     * that the kernel has already set up, accessible to us. It's easier to
     * understand if we just copy all the required pages (i.e. page directory
     * and page tables), and set up the pointers as if VM had done it itself.
     *
     * This allocation will happen without using any page table, and just
     * uses spare pages.
     */
    newpt = &vmprocess->vm_pt;
    if(pt_new(newpt) != OK)
        panic("vm pt_new failed");

    /* Get our current pagedir so we can see it. */
#if defined(__i386__)
    if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
#elif defined(__arm__)
    if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
#endif
        panic("VM: sys_vmctl_get_pdbr failed");
#if defined(__i386__)
    if(sys_vircopy(NONE, mypdbr, SELF,
                   (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
#elif defined(__arm__)
    if(sys_vircopy(NONE, myttbr, SELF,
                   (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
#endif
        panic("VM: sys_vircopy failed");

    /* We have mapped in kernel ourselves; now copy mappings for VM
     * that kernel made, including allocations for BSS. Skip identity
     * mapping bits; just map in VM.
     */
    for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
        u32_t entry = currentpagedir[p];
        phys_bytes ptaddr_kern, ptaddr_us;

        /* BIGPAGEs are kernel mapping (do ourselves) or boot
         * identity mapping (don't want).
         */
        if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
        if((entry & ARCH_VM_BIGPAGE)) continue;

        if(pt_ptalloc(newpt, p, 0) != OK)
            panic("pt_ptalloc failed");
        assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);

#if defined(__i386__)
        ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
        ptaddr_kern = entry & ARCH_VM_PDE_MASK;
        ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
#endif

        /* Copy kernel-initialized pagetable contents into our
         * normally accessible pagetable.
         */
        if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
            panic("pt_init: abscopy failed");
    }

    /* Inform kernel vm has a newly built page table. */
    assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
    pt_bind(newpt, &vmproc[VM_PROC_NR]);

    pt_init_done = 1;

    /* All OK. */
    return;
}
Пример #3
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                       size_t bytes, u32_t flags, u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages, pdecheck;
    int finalpde;
    int verify = 0;

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    vm_assert(!(bytes % I386_PAGE_SIZE));
    vm_assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / I386_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if I386_VM_PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
#if SANITYCHECKS
    if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) {
        vm_panic("pt_writemap: writing dir with !P\n", NO_NUM);
    }
    if(physaddr == MAP_NONE && flags) {
        vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM);
    }
#endif

    finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly. Walk the range in page-directory-entry
     * sized leaps.
     */
    for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
        vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
        if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) {
            printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
                   physaddr, v);
            vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
        }
        if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
            int r;
            if(verify) {
                printf("pt_writemap verify: no pde %d\n", pdecheck);
                return EFAULT;
            }
            vm_assert(!pt->pt_dir[pdecheck]);
            if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
                /* Couldn't do (complete) mapping.
                 * Don't bother freeing any previously
                 * allocated page tables, they're
                 * still writable, don't point to nonsense,
                 * and pt_ptalloc leaves the directory
                 * and other data in a consistent state.
                 */
                printf("pt_writemap: pt_ptalloc failed\n", pdecheck);
                return r;
            }
        }
        vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = I386_VM_PDE(v);
        int pte = I386_VM_PTE(v);

        vm_assert(!(v % I386_PAGE_SIZE));
        vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
        vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
            physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
        }

        if(writemapflags & WMF_FREE) {
            FREE_MEM(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
        entry = (physaddr & I386_VM_ADDR_MASK) | flags;

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
            /* Verify pagetable entry. */
            if(maskedentry != entry) {
                printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
                       pt->pt_pt[pde][pte], maskedentry, entry);
                return EFAULT;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += I386_PAGE_SIZE;
        v += I386_PAGE_SIZE;
    }

    return OK;
}