void *mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
{
	void *ret;
	phys_bytes p;

	assert(!addr);
	assert(!(len % VM_PAGE_SIZE));

	ret = vm_allocpages(&p, VMP_SLAB, len/VM_PAGE_SIZE);

	if(!ret) return MAP_FAILED;
	memset(ret, 0, len);
	return ret;
}
Esempio n. 2
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
int pt_writemap(struct vmproc * vmp,
                pt_t *pt,
                vir_bytes v,
                phys_bytes physaddr,
                size_t bytes,
                u32_t flags,
                u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages;
    int verify = 0;
    int ret = OK;

#ifdef CONFIG_SMP
    int vminhibit_clear = 0;
    /* FIXME
     * don't do it everytime, stop the process only on the first change and
     * resume the execution on the last change. Do in a wrapper of this
     * function
     */
    if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
            !(vmp->vm_flags & VMF_EXITING)) {
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
        vminhibit_clear = 1;
    }
#endif

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    assert(!(bytes % VM_PAGE_SIZE));
    assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / VM_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
    assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
    assert(physaddr != MAP_NONE || !flags);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly.
     */
    ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
    if(ret != OK) {
        printf("VM: writemap: pt_ptalloc_in_range failed\n");
        goto resume_exit;
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);

        /* We do not expect it to be a bigpage. */
        assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert(pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
#if defined(__i386__)
            physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
#elif defined(__arm__)
            physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
#endif
        }

        if(writemapflags & WMF_FREE) {
            free_mem(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
#if defined(__i386__)
        entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
#elif defined(__arm__)
        entry = (physaddr & ARM_VM_PTE_MASK) | flags;
#endif

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
#if defined(__i386__)
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
#endif
            /* Verify pagetable entry. */
#if defined(__i386__)
            if(entry & ARCH_VM_PTE_RW) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry |= ARCH_VM_PTE_RW;
            }
#elif defined(__arm__)
            if(!(entry & ARCH_VM_PTE_RO)) {
                /* If we expect a writable page, allow a readonly page. */
                maskedentry &= ~ARCH_VM_PTE_RO;
            }
            maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
#endif
            if(maskedentry != entry) {
                printf("pt_writemap: mismatch: ");
#if defined(__i386__)
                if((entry & ARCH_VM_ADDR_MASK) !=
                        (maskedentry & ARCH_VM_ADDR_MASK)) {
#elif defined(__arm__)
                if((entry & ARM_VM_PTE_MASK) !=
                        (maskedentry & ARM_VM_PTE_MASK)) {
#endif
                    printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
                           (long)entry, (long)maskedentry);
                } else printf("phys ok; ");
                printf(" flags: found %s; ",
                       ptestr(pt->pt_pt[pde][pte]));
                printf(" masked %s; ",
                       ptestr(maskedentry));
                printf(" expected %s\n", ptestr(entry));
                printf("found 0x%x, wanted 0x%x\n",
                       pt->pt_pt[pde][pte], entry);
                ret = EFAULT;
                goto resume_exit;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += VM_PAGE_SIZE;
        v += VM_PAGE_SIZE;
    }

resume_exit:

#ifdef CONFIG_SMP
    if (vminhibit_clear) {
        assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
               !(vmp->vm_flags & VMF_EXITING));
        sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
    }
#endif

    return ret;
}

/*===========================================================================*
 *				pt_checkrange		     		     *
 *===========================================================================*/
int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                  int write)
{
    int p, pages;

    assert(!(bytes % VM_PAGE_SIZE));

    pages = bytes / VM_PAGE_SIZE;

    for(p = 0; p < pages; p++) {
        int pde = ARCH_VM_PDE(v);
        int pte = ARCH_VM_PTE(v);

        assert(!(v % VM_PAGE_SIZE));
        assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
        assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
            return EFAULT;

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);

        if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
            return EFAULT;
        }

#if defined(__i386__)
        if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
#elif defined(__arm__)
        if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
#endif
            return EFAULT;
        }

        v += VM_PAGE_SIZE;
    }

    return OK;
}

/*===========================================================================*
 *				pt_new			     		     *
 *===========================================================================*/
int pt_new(pt_t *pt)
{
    /* Allocate a pagetable root. Allocate a page-aligned page directory
     * and set them to 0 (indicating no page tables are allocated). Lookup
     * its physical address as we'll need that in the future. Verify it's
     * page-aligned.
     */
    int i, r;

    /* Don't ever re-allocate/re-move a certain process slot's
     * page directory once it's been created. This is a fraction
     * faster, but also avoids having to invalidate the page
     * mappings from in-kernel page tables pointing to
     * the page directories (the page_directories data).
     */
    if(!pt->pt_dir &&
            !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
                                         VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
        return ENOMEM;
    }

    assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));

    for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
        pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
        pt->pt_pt[i] = NULL;
    }

    /* Where to start looking for free virtual address space? */
    pt->pt_virtop = 0;

    /* Map in kernel. */
    if((r=pt_mapkernel(pt)) != OK)
        return r;

    return OK;
}
Esempio n. 3
0
void *vm_allocpage(phys_bytes *phys, int reason)
{
    return vm_allocpages(phys, reason, 1);
}